From 997e8c9bf9c9e32ac78d07ca5f8dbf478b443203 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 22 Sep 2025 10:48:07 +0000 Subject: [PATCH 01/40] Initial benchmark version --- .gitignore | 3 + spec/performance/bench.sh | 177 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100755 spec/performance/bench.sh diff --git a/.gitignore b/.gitignore index 2f2ceb077c..4c6eb2b14c 100644 --- a/.gitignore +++ b/.gitignore @@ -76,6 +76,9 @@ react_on_rails/spec/dummy/**/*.res.js react_on_rails_pro/spec/dummy/.bsb.lock react_on_rails_pro/spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh new file mode 100755 index 0000000000..a0f6f1de40 --- /dev/null +++ b/spec/performance/bench.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +set -euo pipefail +#set -x # Uncomment for debugging commands + +# Benchmark parameters +TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE=${RATE:-50} +# virtual users for k6 +VUS=${VUS:-100} +DURATION_SEC=${DURATION_SEC:-10} +DURATION="${DURATION_SEC}s" +# Tools to run (comma-separated) +TOOLS=${TOOLS:-fortio,vegeta,k6} + +# Validate input parameters +if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then + echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 + exit 1 +fi +if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then + echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 + exit 1 +fi +if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then + echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 + exit 1 +fi + +OUTDIR="bench_results" + +# Precompute checks for each tool +RUN_FORTIO=0 +RUN_VEGETA=0 +RUN_K6=0 +[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 +[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 +[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 + +for cmd in ${TOOLS//,/ } jq column awk tee bc; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required tool '$cmd' is not installed" >&2 + exit 1 + fi +done + +TIMEOUT_SEC=60 +START=$(date +%s) +until curl -fsS "$TARGET" >/dev/null; do + if (( $(date +%s) - START > TIMEOUT_SEC )); then + echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 + exit 1 + fi + sleep 1 +done + +mkdir -p "$OUTDIR" + +if [ "$RATE" = "max" ]; then + FORTIO_ARGS=(-qps 0) + VEGETA_ARGS=(-rate=infinity) + K6_SCENARIOS="{ + max_rate: { + executor: 'shared-iterations', + vus: $VUS, + iterations: $((VUS * DURATION_SEC * 10)), + maxDuration: '$DURATION' + } + }" +else + FORTIO_ARGS=(-qps "$RATE" -uniform) + VEGETA_ARGS=(-rate="$RATE") + K6_SCENARIOS="{ + constant_rate: { + executor: 'constant-arrival-rate', + rate: $RATE, + timeUnit: '1s', + duration: '$DURATION', + preAllocatedVUs: $VUS, + maxVUs: $((VUS * 10)) + } + }" +fi + +if (( RUN_FORTIO )); then + echo "===> Fortio" + # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + | tee "$OUTDIR/fortio.txt" +fi + +if (( RUN_VEGETA )); then + echo + echo "===> Vegeta" + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + | tee "$OUTDIR/vegeta.bin" \ + | vegeta report | tee "$OUTDIR/vegeta.txt" + vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" +fi + +if (( RUN_K6 )); then + echo + echo "===> k6" + cat < "$OUTDIR/k6_test.js" +import http from 'k6/http'; +import { check } from 'k6'; + +export const options = { + scenarios: $K6_SCENARIOS, +}; + +export default function () { + const response = http.get('$TARGET'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); +} +EOF + + k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" +fi + +echo +echo "===> Parsing results and generating summary" + +echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" + +if (( RUN_FORTIO )); then + FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") + echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_VEGETA )); then + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") + echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_K6 )); then + K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + # Status: compute successful vs failed requests + K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") + K6_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.key[7:] + "=" + (.value.passes|tostring)) + | join(",") + ' "$OUTDIR/k6_summary.json") + K6_REQS_KNOWN_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.value.passes) + | add + ' "$OUTDIR/k6_summary.json") + K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) + if [ "$K6_REQS_OTHER" -gt 0 ]; then + K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" + fi + echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" +fi + +echo +echo "Summary saved to $OUTDIR/summary.txt" +column -t -s $'\t' "$OUTDIR/summary.txt" From aefdd45b98ced34a874f6e50ac5d9f7cf883d026 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Thu, 2 Oct 2025 14:27:53 +0000 Subject: [PATCH 02/40] Add production scripts --- react_on_rails/spec/dummy/bin/prod | 29 +++++++++++++++++++++++ react_on_rails/spec/dummy/bin/prod-assets | 9 +++++++ 2 files changed, 38 insertions(+) create mode 100755 react_on_rails/spec/dummy/bin/prod create mode 100755 react_on_rails/spec/dummy/bin/prod-assets diff --git a/react_on_rails/spec/dummy/bin/prod b/react_on_rails/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/react_on_rails/spec/dummy/bin/prod-assets b/react_on_rails/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..cf493134fa --- /dev/null +++ b/react_on_rails/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +yarn run build:rescript +bundle exec rails assets:precompile From 7833460983310c5f17b382a7f57ff2105b711216 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 3 Oct 2025 16:17:25 +0000 Subject: [PATCH 03/40] Initial benchmark workflow --- .github/workflows/benchmark.yml | 354 ++++++++++++++++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..cd2ea762ff --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,354 @@ +name: Benchmark Workflow + +on: + # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug + workflow_dispatch: + inputs: + debug_enabled: + description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' + required: false + default: false + type: boolean + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: '50' + type: string + duration_sec: + description: 'Duration in seconds' + required: false + default: 10 + type: number + vus: + description: 'Virtual users for k6' + required: false + default: 100 + type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string + push: + branches: + - master + paths-ignore: + - '**.md' + - 'docs/**' + pull_request: + paths-ignore: + - '**.md' + - 'docs/**' +env: + FORTIO_VERSION: "1.73.0" + K6_VERSION: "1.3.0" + VEGETA_VERSION: "12.13.0" + # Benchmark parameters + RATE: ${{ github.event.inputs.rate || '50' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + VUS: ${{ github.event.inputs.vus || '100' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + +jobs: + benchmark: + runs-on: ubuntu-latest + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: OPTIONAL SSH ACCESS + # ============================================ + # NOTE: Interactive confirmation is not possible in GitHub Actions. + # As a secure workaround, SSH access is gated by the workflow_dispatch + # input variable 'debug_enabled' which defaults to false. + # Users must explicitly set this to true to enable SSH. + + - name: SSH Warning + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + run: | + echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" + echo "" + echo "SECURITY NOTICE:" + echo " - SSH access exposes your GitHub Actions runner" + echo " - Only proceed if you understand and accept the risks" + echo " - Do NOT store secrets or sensitive data on the runner" + echo " - Access is limited to the workflow initiator only" + echo " - The session will remain open until manually terminated" + echo "" + echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" + + - name: Setup SSH access (if enabled) + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + uses: mxschmitt/action-tmate@v3 + with: + detached: true + limit-access-to-actor: true # Only workflow trigger can access + + # ============================================ + # STEP 3: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Fortio binary + id: cache-fortio + uses: actions/cache@v4 + with: + path: ~/bin/fortio + key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} + + - name: Install Fortio + if: steps.cache-fortio.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Fortio v${FORTIO_VERSION}" + + # Download and extract fortio binary + wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz + tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz + + # Store in cache directory + mv usr/bin/fortio ~/bin/ + + - name: Cache Vegeta binary + id: cache-vegeta + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 4: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler: 2.5.9 + + - name: Cache foreman gem + id: cache-foreman + uses: actions/cache@v4 + with: + path: ~/.gem + key: foreman-gem-${{ runner.os }}-ruby-3.3.7 + + - name: Install foreman + if: steps.cache-foreman.outputs.cache-hit != 'true' + run: gem install foreman + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: yarn + cache-dependency-path: '**/yarn.lock' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Yarn version: "; yarn --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Yarn for renderer package + run: | + yarn install --no-progress --no-emoji --frozen-lockfile + npm install --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + + - name: yalc add react-on-rails + run: cd spec/dummy && yalc add react-on-rails + + - name: Install Node modules with Yarn for dummy app + run: cd spec/dummy && yarn install --no-progress --no-emoji + + - name: Save dummy app ruby gems to cache + uses: actions/cache@v4 + with: + path: spec/dummy/vendor/bundle + key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for dummy app + run: | + cd spec/dummy + bundle lock --add-platform 'x86_64-linux' + if ! bundle check --path=vendor/bundle; then + bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + fi + + - name: generate file system-based packs + run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + + - name: Prepare production assets + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start production server + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 5: RUN BENCHMARKS + # ============================================ + + - name: Execute benchmark suite + timeout-minutes: 20 + run: | + set -e # Exit on any error + echo "🏃 Running benchmark suite..." + echo "Script: spec/performance/bench.sh" + echo "" + echo "Benchmark parameters:" + echo " - RATE: ${RATE}" + echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - VUS: ${VUS}" + echo " - TOOLS: ${TOOLS}" + echo "" + + if ! spec/performance/bench.sh; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate benchmark results + run: | + set -e # Exit on any error + echo "🔍 Validating benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + # Check if results directory exists + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + # List all generated files + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + # Check for required files + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + # Report validation results + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + # ============================================ + # STEP 6: COLLECT BENCHMARK RESULTS + # ============================================ + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + if: always() # Upload even if benchmark fails + with: + name: benchmark-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Verify artifact upload + if: success() + run: | + echo "✅ Benchmark results uploaded as workflow artifacts" + echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" + echo "🔗 Access artifacts from the Actions tab in GitHub" + + # ============================================ + # WORKFLOW COMPLETION + # ============================================ + + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "==============================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi From 571f8b8a61a079c67a8fd3002a424285a826b557 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 12:49:50 +0000 Subject: [PATCH 04/40] Add server warm-up to benchmark --- spec/performance/bench.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index a0f6f1de40..ebafa55080 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -54,6 +54,13 @@ until curl -fsS "$TARGET" >/dev/null; do sleep 1 done +echo "Warming up server with 10 requests..." +for i in {1..10}; do + curl -fsS "$TARGET" >/dev/null || true + sleep 0.5 +done +echo "Warm-up complete" + mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then From 5c518e9f08250f8f18867c0d64b8111a0d95ea8e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:13:44 +0000 Subject: [PATCH 05/40] Make request timeout a parameter --- .github/workflows/benchmark.yml | 6 ++++++ spec/performance/bench.sh | 13 +++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index cd2ea762ff..28bc7d6e9e 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -19,6 +19,11 @@ on: required: false default: 10 type: number + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string vus: description: 'Virtual users for k6' required: false @@ -46,6 +51,7 @@ env: # Benchmark parameters RATE: ${{ github.event.inputs.rate || '50' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index ebafa55080..5227e1080d 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -10,6 +10,8 @@ RATE=${RATE:-50} VUS=${VUS:-100} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} # Tools to run (comma-separated) TOOLS=${TOOLS:-fortio,vegeta,k6} @@ -26,6 +28,10 @@ if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 exit 1 fi +if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then + echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 + exit 1 +fi OUTDIR="bench_results" @@ -92,14 +98,14 @@ fi if (( RUN_FORTIO )); then echo "===> Fortio" # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ | tee "$OUTDIR/fortio.txt" fi if (( RUN_VEGETA )); then echo echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ | tee "$OUTDIR/vegeta.bin" \ | vegeta report | tee "$OUTDIR/vegeta.txt" vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" @@ -114,6 +120,9 @@ import { check } from 'k6'; export const options = { scenarios: $K6_SCENARIOS, + httpReq: { + timeout: '$REQUEST_TIMEOUT', + }, }; export default function () { From a4609d34d33208454925c8fb949d990f3fd3f154 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:27:16 +0000 Subject: [PATCH 06/40] Update defaults for now --- .github/workflows/benchmark.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 28bc7d6e9e..ea92e799aa 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -12,12 +12,12 @@ on: rate: description: 'Requests per second (use "max" for maximum throughput)' required: false - default: '50' + default: 'max' type: string duration_sec: description: 'Duration in seconds' required: false - default: 10 + default: 30 type: number request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' @@ -49,8 +49,8 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Benchmark parameters - RATE: ${{ github.event.inputs.rate || '50' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} From cfe93e6b80ece07efb4eacb2aae65611830a6c7e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:39:08 +0000 Subject: [PATCH 07/40] Fix knip error --- knip.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/knip.ts b/knip.ts index 6cc0239ee3..29d76fd106 100644 --- a/knip.ts +++ b/knip.ts @@ -13,6 +13,9 @@ const config: KnipConfig = { // Pro package binaries used in Pro workflows 'playwright', 'e2e-test', + // Local binaries + 'bin/.*', + 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**', 'react_on_rails/vendor/**'], ignoreDependencies: [ From 6e7c8479d3604e3dde0656385ce78506958d2985 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:04:18 +0000 Subject: [PATCH 08/40] Enable clustered mode in production --- .github/workflows/benchmark.yml | 8 +++++ react_on_rails/spec/dummy/config/puma.rb | 41 +++++++++++++++--------- 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ea92e799aa..ab925d3841 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -34,6 +34,11 @@ on: required: false default: 'fortio,vegeta,k6' type: string + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 2 + type: number push: branches: - master @@ -54,6 +59,7 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} jobs: benchmark: @@ -270,8 +276,10 @@ jobs: echo "Benchmark parameters:" echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - VUS: ${VUS}" echo " - TOOLS: ${TOOLS}" + echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" if ! spec/performance/bench.sh; then diff --git a/react_on_rails/spec/dummy/config/puma.rb b/react_on_rails/spec/dummy/config/puma.rb index de5feec982..01b93c7d91 100644 --- a/react_on_rails/spec/dummy/config/puma.rb +++ b/react_on_rails/spec/dummy/config/puma.rb @@ -10,10 +10,12 @@ min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } threads min_threads_count, max_threads_count +rails_env = ENV.fetch("RAILS_ENV", "development") + # Specifies the `worker_timeout` threshold that Puma will use to wait before # terminating a worker in development environments. # -worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" +worker_timeout 3600 if rails_env == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # @@ -21,25 +23,32 @@ # Specifies the `environment` that Puma will run in. # -environment ENV.fetch("RAILS_ENV", "development") +environment rails_env # Specifies the `pidfile` that Puma will use. pidfile ENV.fetch("PIDFILE", "tmp/pids/server.pid") -# Specifies the number of `workers` to boot in clustered mode. -# Workers are forked web server processes. If using threads and workers together -# the concurrency of the application would be max `threads` * `workers`. -# Workers do not work on JRuby or Windows (both of which do not support -# processes). -# -# workers ENV.fetch("WEB_CONCURRENCY") { 2 } - -# Use the `preload_app!` method when specifying a `workers` number. -# This directive tells Puma to first boot the application and load code -# before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. -# -# preload_app! +if rails_env == "production" + # Specifies the number of `workers` to boot in clustered mode. + # Workers are forked web server processes. If using threads and workers together + # the concurrency of the application would be max `threads` * `workers`. + # Workers do not work on JRuby or Windows (both of which do not support + # processes). + # + workers ENV.fetch("WEB_CONCURRENCY", 2) + + # Use the `preload_app!` method when specifying a `workers` number. + # This directive tells Puma to first boot the application and load code + # before forking the application. This takes advantage of Copy On Write + # process behavior so workers use less memory. + # + preload_app! + + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before + # terminating a worker. + # + worker_shutdown_timeout 60 +end # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart From ff915e5348213d392a695ce4aa162987b194448a Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:44:21 +0000 Subject: [PATCH 09/40] Add MAX_CONNECTIONS --- .github/workflows/benchmark.yml | 16 +++++++++++----- spec/performance/bench.sh | 34 +++++++++++++++++++++------------ 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ab925d3841..16d2b18c62 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -24,10 +24,14 @@ on: required: false default: '60s' type: string - vus: - description: 'Virtual users for k6' + connections: + description: 'Concurrent connections/virtual users' + required: false + default: 10 + type: number + max_connections: + description: 'Maximum connections/virtual users' required: false - default: 100 type: number tools: description: 'Comma-separated list of tools to run' @@ -57,7 +61,8 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - VUS: ${{ github.event.inputs.vus || '100' }} + CONNECTIONS: ${{ github.event.inputs.connections || '10' }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} @@ -277,7 +282,8 @@ jobs: echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - VUS: ${VUS}" + echo " - CONNECTIONS: ${CONNECTIONS}" + echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index 5227e1080d..dca256fbf4 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -6,8 +6,10 @@ set -euo pipefail TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE=${RATE:-50} -# virtual users for k6 -VUS=${VUS:-100} +# concurrent connections/virtual users +CONNECTIONS=${CONNECTIONS:-10} +# maximum connections/virtual users +MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" # request timeout (duration string like "60s", "1m", "90s") @@ -20,8 +22,12 @@ if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 exit 1 fi -if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then - echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 +if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then + echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 + exit 1 +fi +if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then + echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 exit 1 fi if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then @@ -70,27 +76,31 @@ echo "Warm-up complete" mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then - FORTIO_ARGS=(-qps 0) - VEGETA_ARGS=(-rate=infinity) + if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then + echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" + exit 1 + fi + FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") + VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { executor: 'shared-iterations', - vus: $VUS, - iterations: $((VUS * DURATION_SEC * 10)), + vus: $CONNECTIONS, + iterations: $((CONNECTIONS * DURATION_SEC * 10)), maxDuration: '$DURATION' } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform) - VEGETA_ARGS=(-rate="$RATE") + FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { executor: 'constant-arrival-rate', rate: $RATE, timeUnit: '1s', duration: '$DURATION', - preAllocatedVUs: $VUS, - maxVUs: $((VUS * 10)) + preAllocatedVUs: $CONNECTIONS, + maxVUs: $MAX_CONNECTIONS } }" fi From 7b36b52edfd934cb0dcfb73cdc279c5074f08ae9 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:45:22 +0000 Subject: [PATCH 10/40] Fix max rate K6 scenario --- spec/performance/bench.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index dca256fbf4..d147c3e071 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -84,10 +84,9 @@ if [ "$RATE" = "max" ]; then VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { - executor: 'shared-iterations', + executor: 'constant-vus', vus: $CONNECTIONS, - iterations: $((CONNECTIONS * DURATION_SEC * 10)), - maxDuration: '$DURATION' + duration: '$DURATION' } }" else From 06c141db6c09b6b546dfd07ae93b7cb9cc62e36a Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:52:55 +0000 Subject: [PATCH 11/40] Reorder workflow parameters more logically --- .github/workflows/benchmark.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 16d2b18c62..4f052bda4c 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -33,16 +33,16 @@ on: description: 'Maximum connections/virtual users' required: false type: number - tools: - description: 'Comma-separated list of tools to run' - required: false - default: 'fortio,vegeta,k6' - type: string web_concurrency: description: 'Number of Puma worker processes' required: false default: 2 type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string push: branches: - master @@ -63,8 +63,8 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || '10' }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: @@ -284,8 +284,8 @@ jobs: echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - TOOLS: ${TOOLS}" echo "" if ! spec/performance/bench.sh; then From cc4f4db4166a49748eeeef6844441f744ba55977 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:15:39 +0000 Subject: [PATCH 12/40] Closer to recommended Fortio options --- spec/performance/bench.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index d147c3e071..b8a5be9256 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -90,7 +90,7 @@ if [ "$RATE" = "max" ]; then } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { From 745c73c438f4a36300d1e76ffdec010a00d7f6f4 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:23:37 +0000 Subject: [PATCH 13/40] Allow configuring RAILS_MAX/MIN_THREADS in the workflow --- .github/workflows/benchmark.yml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 4f052bda4c..ea64561344 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -36,7 +36,16 @@ on: web_concurrency: description: 'Number of Puma worker processes' required: false - default: 2 + default: 4 + type: number + rails_max_threads: + description: 'Maximum number of Puma threads' + required: false + default: 3 + type: number + rails_min_threads: + description: 'Minimum number of Puma threads (same as maximum if not set)' + required: false type: number tools: description: 'Comma-separated list of tools to run' @@ -61,9 +70,11 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || '10' }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: @@ -285,6 +296,8 @@ jobs: echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" + echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" echo " - TOOLS: ${TOOLS}" echo "" From 7efe9b65bdbc1c9d4f61bd718fc0d211cc7ec64b Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:57:09 +0000 Subject: [PATCH 14/40] Move showing benchmark params to bench.sh for simplicity --- .github/workflows/benchmark.yml | 13 ------------- spec/performance/bench.sh | 12 ++++++++++++ 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ea64561344..d391830989 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -287,19 +287,6 @@ jobs: run: | set -e # Exit on any error echo "🏃 Running benchmark suite..." - echo "Script: spec/performance/bench.sh" - echo "" - echo "Benchmark parameters:" - echo " - RATE: ${RATE}" - echo " - DURATION_SEC: ${DURATION_SEC}" - echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - CONNECTIONS: ${CONNECTIONS}" - echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" - echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" - echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" - echo " - TOOLS: ${TOOLS}" - echo "" if ! spec/performance/bench.sh; then echo "❌ ERROR: Benchmark execution failed" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index b8a5be9256..b83d429497 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -56,6 +56,18 @@ for cmd in ${TOOLS//,/ } jq column awk tee bc; do fi done +echo "Benchmark parameters: + - RATE: ${RATE:-unset} + - DURATION_SEC: ${DURATION_SEC:-unset} + - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} + - CONNECTIONS: ${CONNECTIONS:-unset} + - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} + - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} + - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} + - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} + - TOOLS: ${TOOLS:-unset} +" + TIMEOUT_SEC=60 START=$(date +%s) until curl -fsS "$TARGET" >/dev/null; do From 97fa1cc6a1acf08174c8a38875f50c90c79826c1 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:36:03 +0000 Subject: [PATCH 15/40] Convert the benchmark script to Ruby --- .github/workflows/benchmark.yml | 2 +- knip.ts | 1 - spec/performance/bench.rb | 318 ++++++++++++++++++++++++++++++++ spec/performance/bench.sh | 214 --------------------- 4 files changed, 319 insertions(+), 216 deletions(-) create mode 100755 spec/performance/bench.rb delete mode 100755 spec/performance/bench.sh diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d391830989..e2a2d9661e 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -288,7 +288,7 @@ jobs: set -e # Exit on any error echo "🏃 Running benchmark suite..." - if ! spec/performance/bench.sh; then + if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" exit 1 fi diff --git a/knip.ts b/knip.ts index 29d76fd106..36bfc74ff6 100644 --- a/knip.ts +++ b/knip.ts @@ -15,7 +15,6 @@ const config: KnipConfig = { 'e2e-test', // Local binaries 'bin/.*', - 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**', 'react_on_rails/vendor/**'], ignoreDependencies: [ diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb new file mode 100755 index 0000000000..6f9a9536f4 --- /dev/null +++ b/spec/performance/bench.rb @@ -0,0 +1,318 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Benchmark parameters +BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") +TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE = ENV.fetch("RATE", "50") +# concurrent connections/virtual users +CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +# maximum connections/virtual users +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i +DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f +DURATION = "#{DURATION_SEC}s".freeze +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +# Tools to run (comma-separated) +TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") + +OUTDIR = "bench_results" +FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze +FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze +VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze +VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze +VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze +K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze +K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze +K6_TXT = "#{OUTDIR}/k6.txt".freeze +SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze + +# Validate input parameters +def validate_rate(rate) + return if rate == "max" + + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.is_a?(Numeric) && value.positive? + + raise "#{name} must be a positive number (got: '#{value}')" +end + +def validate_timeout(value) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" +end + +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION_SEC, "DURATION_SEC") +validate_timeout(REQUEST_TIMEOUT) + +raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS + +# Precompute checks for each tool +run_fortio = TOOLS.include?("fortio") +run_vegeta = TOOLS.include?("vegeta") +run_k6 = TOOLS.include?("k6") + +# Check required tools are installed +required_tools = TOOLS + %w[column tee] +required_tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") +end + +puts <<~PARAMS + Benchmark parameters: + - RATE: #{RATE} + - DURATION_SEC: #{DURATION_SEC} + - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} + - CONNECTIONS: #{CONNECTIONS} + - MAX_CONNECTIONS: #{MAX_CONNECTIONS} + - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} + - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} + - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} + - TOOLS: #{TOOLS.join(', ')} +PARAMS + +# Helper method to check if server is responding +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + response.is_a?(Net::HTTPSuccess) +rescue StandardError + false +end + +# Wait for the server to be ready +TIMEOUT_SEC = 60 +start_time = Time.now +loop do + break if server_responding?(TARGET) + + raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + + sleep 1 +end + +# Warm up server +puts "Warming up server with 10 requests..." +10.times do + server_responding?(TARGET) + sleep 0.5 +end +puts "Warm-up complete" + +FileUtils.mkdir_p(OUTDIR) + +# Configure tool-specific arguments +if RATE == "max" + if CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" + end + + fortio_args = ["-qps", 0, "-c", CONNECTIONS] + vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS +else + fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS +end + +# Run Fortio +if run_fortio + puts "===> Fortio" + # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", FORTIO_JSON, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +end + +# Run Vegeta +if run_vegeta + puts "\n===> Vegeta" + vegeta_cmd = [ + "echo", "'GET #{TARGET}'", "|", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") +end + +# Run k6 +if run_k6 + puts "\n===> k6" + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + httpReq: { + timeout: '#{REQUEST_TIMEOUT}', + }, + }; + + export default function () { + const response = http.get('#{TARGET}'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(K6_TEST_JS, k6_script) + k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") +end + +puts "\n===> Parsing results and generating summary" + +# Initialize summary file +File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") + +# Parse Fortio results +if run_fortio + begin + fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" + + percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] + p50_data = percentiles.find { |p| p["Percentile"] == 50 } + p90_data = percentiles.find { |p| p["Percentile"] == 90 } + p99_data = percentiles.find { |p| p["Percentile"] == 99 } + + raise "Fortio results missing percentile data" unless p50_data && p90_data && p99_data + + fortio_p50 = (p50_data["Value"] * 1000).round(2) + fortio_p90 = (p90_data["Value"] * 1000).round(2) + fortio_p99 = (p99_data["Value"] * 1000).round(2) + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse Vegeta results +if run_vegeta + begin + vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + vegeta_line = [ + "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status + ].join("\t") + File.open(SUMMARY_TXT, "a") do |f| + f.puts vegeta_line + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse k6 results +if run_k6 + begin + k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + + # Status: compute successful vs failed requests + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + # Extract status code from check name (e.g., "status=200" -> "200") + # Handle both "status=XXX" format and other potential formats + k6_status_parts = k6_checks.map do |name, check| + status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name + "#{status_label}=#{check['passes']}" + end + k6_reqs_known_status = k6_checks.values.sum { |check| check["passes"] || 0 } + k6_reqs_other = k6_reqs_total - k6_reqs_known_status + k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +puts "\nSummary saved to #{SUMMARY_TXT}" +system("column", "-t", "-s", "\t", SUMMARY_TXT) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh deleted file mode 100755 index b83d429497..0000000000 --- a/spec/performance/bench.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -#set -x # Uncomment for debugging commands - -# Benchmark parameters -TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" -# requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE=${RATE:-50} -# concurrent connections/virtual users -CONNECTIONS=${CONNECTIONS:-10} -# maximum connections/virtual users -MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} -DURATION_SEC=${DURATION_SEC:-10} -DURATION="${DURATION_SEC}s" -# request timeout (duration string like "60s", "1m", "90s") -REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} -# Tools to run (comma-separated) -TOOLS=${TOOLS:-fortio,vegeta,k6} - -# Validate input parameters -if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then - echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 - exit 1 -fi -if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then - echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then - echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then - echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 - exit 1 -fi -if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then - echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 - exit 1 -fi - -OUTDIR="bench_results" - -# Precompute checks for each tool -RUN_FORTIO=0 -RUN_VEGETA=0 -RUN_K6=0 -[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 -[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 -[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 - -for cmd in ${TOOLS//,/ } jq column awk tee bc; do - if ! command -v "$cmd" >/dev/null 2>&1; then - echo "Error: required tool '$cmd' is not installed" >&2 - exit 1 - fi -done - -echo "Benchmark parameters: - - RATE: ${RATE:-unset} - - DURATION_SEC: ${DURATION_SEC:-unset} - - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} - - CONNECTIONS: ${CONNECTIONS:-unset} - - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} - - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} - - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} - - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} - - TOOLS: ${TOOLS:-unset} -" - -TIMEOUT_SEC=60 -START=$(date +%s) -until curl -fsS "$TARGET" >/dev/null; do - if (( $(date +%s) - START > TIMEOUT_SEC )); then - echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 - exit 1 - fi - sleep 1 -done - -echo "Warming up server with 10 requests..." -for i in {1..10}; do - curl -fsS "$TARGET" >/dev/null || true - sleep 0.5 -done -echo "Warm-up complete" - -mkdir -p "$OUTDIR" - -if [ "$RATE" = "max" ]; then - if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then - echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" - exit 1 - fi - FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") - VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") - K6_SCENARIOS="{ - max_rate: { - executor: 'constant-vus', - vus: $CONNECTIONS, - duration: '$DURATION' - } - }" -else - FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") - VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") - K6_SCENARIOS="{ - constant_rate: { - executor: 'constant-arrival-rate', - rate: $RATE, - timeUnit: '1s', - duration: '$DURATION', - preAllocatedVUs: $CONNECTIONS, - maxVUs: $MAX_CONNECTIONS - } - }" -fi - -if (( RUN_FORTIO )); then - echo "===> Fortio" - # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ - | tee "$OUTDIR/fortio.txt" -fi - -if (( RUN_VEGETA )); then - echo - echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ - | tee "$OUTDIR/vegeta.bin" \ - | vegeta report | tee "$OUTDIR/vegeta.txt" - vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" -fi - -if (( RUN_K6 )); then - echo - echo "===> k6" - cat < "$OUTDIR/k6_test.js" -import http from 'k6/http'; -import { check } from 'k6'; - -export const options = { - scenarios: $K6_SCENARIOS, - httpReq: { - timeout: '$REQUEST_TIMEOUT', - }, -}; - -export default function () { - const response = http.get('$TARGET'); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); -} -EOF - - k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" -fi - -echo -echo "===> Parsing results and generating summary" - -echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" - -if (( RUN_FORTIO )); then - FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") - echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_VEGETA )); then - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period - VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") - echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_K6 )); then - K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - # Status: compute successful vs failed requests - K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") - K6_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.key[7:] + "=" + (.value.passes|tostring)) - | join(",") - ' "$OUTDIR/k6_summary.json") - K6_REQS_KNOWN_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.value.passes) - | add - ' "$OUTDIR/k6_summary.json") - K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) - if [ "$K6_REQS_OTHER" -gt 0 ]; then - K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" - fi - echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" -fi - -echo -echo "Summary saved to $OUTDIR/summary.txt" -column -t -s $'\t' "$OUTDIR/summary.txt" From 64a3b717410a9fb4bb3f0b46fbca91f914c49dcc Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:39:59 +0000 Subject: [PATCH 16/40] Fix k6 timeout --- spec/performance/bench.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 6f9a9536f4..9021b41235 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -204,13 +204,10 @@ def server_responding?(uri) export const options = { scenarios: #{k6_scenarios}, - httpReq: { - timeout: '#{REQUEST_TIMEOUT}', - }, }; export default function () { - const response = http.get('#{TARGET}'); + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, // you can add more if needed: From de8a885329930c3df6f8c046620d158e35395695 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:49:21 +0000 Subject: [PATCH 17/40] Replace DURATION_SEC with DURATION --- .github/workflows/benchmark.yml | 10 +++++----- spec/performance/bench.rb | 22 ++++++++-------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e2a2d9661e..388ac9c722 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -14,11 +14,11 @@ on: required: false default: 'max' type: string - duration_sec: - description: 'Duration in seconds' + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' required: false - default: 30 - type: number + default: '30s' + type: string request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' required: false @@ -68,7 +68,7 @@ env: VEGETA_VERSION: "12.13.0" # Benchmark parameters RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} + DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 9021b41235..ac12eba048 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -15,10 +15,10 @@ # concurrent connections/virtual users CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i -DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f -DURATION = "#{DURATION_SEC}s".freeze -# request timeout (duration string like "60s", "1m", "90s") +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +# benchmark duration (duration string like "30s", "1m", "90s") +DURATION = ENV.fetch("DURATION", "30s") +# request timeout (duration string as above) REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") @@ -50,15 +50,9 @@ def validate_positive_integer(value, name) end def validate_duration(value, name) - return if value.is_a?(Numeric) && value.positive? - - raise "#{name} must be a positive number (got: '#{value}')" -end - -def validate_timeout(value) return if value.match?(/^(\d+(\.\d+)?[smh])+$/) - raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" end def parse_json_file(file_path, tool_name) @@ -74,8 +68,8 @@ def parse_json_file(file_path, tool_name) validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") -validate_duration(DURATION_SEC, "DURATION_SEC") -validate_timeout(REQUEST_TIMEOUT) +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS @@ -93,7 +87,7 @@ def parse_json_file(file_path, tool_name) puts <<~PARAMS Benchmark parameters: - RATE: #{RATE} - - DURATION_SEC: #{DURATION_SEC} + - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} - CONNECTIONS: #{CONNECTIONS} - MAX_CONNECTIONS: #{MAX_CONNECTIONS} From 2379a63bc10063f2a67553c0098b5f68412a208b Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:16:49 +0000 Subject: [PATCH 18/40] Group all code for a tool into a single block --- spec/performance/bench.rb | 225 +++++++++++++++++++------------------- 1 file changed, 115 insertions(+), 110 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index ac12eba048..0bfa77e890 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -24,14 +24,6 @@ TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" -FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze -FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze -VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze -VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze -VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze -K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze -K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze -K6_TXT = "#{OUTDIR}/k6.txt".freeze SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze # Validate input parameters @@ -73,11 +65,6 @@ def parse_json_file(file_path, tool_name) raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS -# Precompute checks for each tool -run_fortio = TOOLS.include?("fortio") -run_vegeta = TOOLS.include?("vegeta") -run_k6 = TOOLS.include?("k6") - # Check required tools are installed required_tools = TOOLS + %w[column tee] required_tools.each do |cmd| @@ -126,103 +113,43 @@ def server_responding?(uri) FileUtils.mkdir_p(OUTDIR) -# Configure tool-specific arguments -if RATE == "max" - if CONNECTIONS != MAX_CONNECTIONS - raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" - end - - fortio_args = ["-qps", 0, "-c", CONNECTIONS] - vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - max_rate: { - executor: 'constant-vus', - vus: #{CONNECTIONS}, - duration: '#{DURATION}' - } - } - JS -else - fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] - vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - constant_rate: { - executor: 'constant-arrival-rate', - rate: #{RATE}, - timeUnit: '1s', - duration: '#{DURATION}', - preAllocatedVUs: #{CONNECTIONS}, - maxVUs: #{MAX_CONNECTIONS} - } - } - JS -end - -# Run Fortio -if run_fortio - puts "===> Fortio" - # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio_cmd = [ - "fortio", "load", - *fortio_args, - "-t", DURATION, - "-timeout", REQUEST_TIMEOUT, - "-json", FORTIO_JSON, - TARGET - ].join(" ") - raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +# Validate RATE=max constraint +is_max_rate = RATE == "max" +if is_max_rate && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Run Vegeta -if run_vegeta - puts "\n===> Vegeta" - vegeta_cmd = [ - "echo", "'GET #{TARGET}'", "|", - "vegeta", "attack", - *vegeta_args, - "-duration=#{DURATION}", - "-timeout=#{REQUEST_TIMEOUT}" - ].join(" ") - raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") - raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") -end - -# Run k6 -if run_k6 - puts "\n===> k6" - k6_script = <<~JS - import http from 'k6/http'; - import { check } from 'k6'; - - export const options = { - scenarios: #{k6_scenarios}, - }; - - export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); - } - JS - File.write(K6_TEST_JS, k6_script) - k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" - raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") -end - -puts "\n===> Parsing results and generating summary" - # Initialize summary file File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") -# Parse Fortio results -if run_fortio +# Fortio +if TOOLS.include?("fortio") begin - fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + puts "===> Fortio" + + fortio_json = "#{OUTDIR}/fortio.json" + fortio_txt = "#{OUTDIR}/fortio.txt" + + # Configure Fortio arguments + # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_args = + if is_max_rate + ["-qps", 0, "-c", CONNECTIONS] + else + ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + end + + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", fortio_json, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") + + fortio_data = parse_json_file(fortio_json, "Fortio") fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] @@ -247,10 +174,34 @@ def server_responding?(uri) end end -# Parse Vegeta results -if run_vegeta +# Vegeta +if TOOLS.include?("vegeta") begin - vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + puts "\n===> Vegeta" + + vegeta_bin = "#{OUTDIR}/vegeta.bin" + vegeta_json = "#{OUTDIR}/vegeta.json" + vegeta_txt = "#{OUTDIR}/vegeta.txt" + + # Configure Vegeta arguments + vegeta_args = + if is_max_rate + ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + end + + vegeta_cmd = [ + "echo 'GET #{TARGET}' |", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{vegeta_bin} | vegeta report | tee #{vegeta_txt}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + vegeta_data = parse_json_file(vegeta_json, "Vegeta") # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" @@ -271,10 +222,64 @@ def server_responding?(uri) end end -# Parse k6 results -if run_k6 +# k6 +if TOOLS.include?("k6") begin - k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + puts "\n===> k6" + + k6_script_file = "#{OUTDIR}/k6_test.js" + k6_summary_json = "#{OUTDIR}/k6_summary.json" + k6_txt = "#{OUTDIR}/k6.txt" + + # Configure k6 scenarios + k6_scenarios = + if is_max_rate + <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS + else + <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS + end + + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + }; + + export default function () { + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(k6_script_file, k6_script) + k6_command = "k6 run --summary-export=#{k6_summary_json} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{k6_script_file} | tee #{k6_txt}") + + k6_data = parse_json_file(k6_summary_json, "k6") k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" From 2054cfd1a42a9d7b884d74ee8adb787c32b11a3c Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:54:44 +0000 Subject: [PATCH 19/40] Remove duplication in adding summaries --- spec/performance/bench.rb | 58 +++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 0bfa77e890..9cec1b0d37 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -57,6 +57,16 @@ def parse_json_file(file_path, tool_name) raise "Failed to read #{tool_name} results: #{e.message}" end +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +def add_summary_line(*parts) + File.open(SUMMARY_TXT, "a") do |f| + f.puts parts.join("\t") + end +end + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -120,11 +130,12 @@ def server_responding?(uri) end # Initialize summary file -File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") +File.write(SUMMARY_TXT, "") +add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Fortio if TOOLS.include?("fortio") - begin + fortio_metrics = begin puts "===> Fortio" fortio_json = "#{OUTDIR}/fortio.json" @@ -162,21 +173,20 @@ def server_responding?(uri) fortio_p50 = (p50_data["Value"] * 1000).round(2) fortio_p90 = (p90_data["Value"] * 1000).round(2) fortio_p99 = (p99_data["Value"] * 1000).round(2) - fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" - end + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [fortio_rps, fortio_p50, fortio_p90, fortio_p99, fortio_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("Fortio", *fortio_metrics) end # Vegeta if TOOLS.include?("vegeta") - begin + vegeta_metrics = begin puts "\n===> Vegeta" vegeta_bin = "#{OUTDIR}/vegeta.bin" @@ -207,24 +217,20 @@ def server_responding?(uri) vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - vegeta_line = [ - "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status - ].join("\t") - File.open(SUMMARY_TXT, "a") do |f| - f.puts vegeta_line - end + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("Vegeta", *vegeta_metrics) end # k6 if TOOLS.include?("k6") - begin + k6_metrics = begin puts "\n===> k6" k6_script_file = "#{OUTDIR}/k6_test.js" @@ -299,15 +305,13 @@ def server_responding?(uri) k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" - end + [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] rescue StandardError => e puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + failure_metrics(e) end + + add_summary_line("k6", *k6_metrics) end puts "\nSummary saved to #{SUMMARY_TXT}" From 103f6d5813fc56409ec2c1f4253c52388f6b5ec2 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:19:03 +0000 Subject: [PATCH 20/40] Benchmark all routes --- spec/performance/bench.rb | 159 +++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 53 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 9cec1b0d37..0323382b87 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -1,15 +1,16 @@ #!/usr/bin/env ruby # frozen_string_literal: true +require "English" require "json" require "fileutils" require "net/http" require "uri" # Benchmark parameters +PRO = ENV.fetch("PRO", "false") == "true" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") -ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") -TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") # concurrent connections/virtual users @@ -67,6 +68,30 @@ def add_summary_line(*parts) end end +# Get routes from the Rails app filtered by pages# and react_router# controllers +def get_benchmark_routes(app_dir) + routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` + raise "Failed to get routes from #{app_dir}" unless $CHILD_STATUS.success? + + routes = [] + routes_output.each_line do |line| + # Parse lines like: "server_side_hello_world GET /server_side_hello_world(.:format) pages#server_side_hello_world" + # We want GET routes only (not POST, etc.) served by pages# or react_router# controllers + # Capture path up to (.:format) part using [^(\s]+ (everything except '(' and whitespace) + next unless (match = line.match(/GET\s+([^(\s]+).*(pages|react_router)#/)) + + path = match[1] + path = "/" if path.empty? # Handle root route + routes << path + end + raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? + + routes +end + +# Get all routes to benchmark +routes = get_benchmark_routes(APP_DIR) + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -83,6 +108,8 @@ def add_summary_line(*parts) puts <<~PARAMS Benchmark parameters: + - APP_DIR: #{APP_DIR} + - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} @@ -104,47 +131,42 @@ def server_responding?(uri) # Wait for the server to be ready TIMEOUT_SEC = 60 +puts "Checking server availability at #{BASE_URL}..." +test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now loop do - break if server_responding?(TARGET) + break if server_responding?(test_uri) - raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC sleep 1 end - -# Warm up server -puts "Warming up server with 10 requests..." -10.times do - server_responding?(TARGET) - sleep 0.5 -end -puts "Warm-up complete" +puts "Server is ready!" FileUtils.mkdir_p(OUTDIR) # Validate RATE=max constraint -is_max_rate = RATE == "max" -if is_max_rate && CONNECTIONS != MAX_CONNECTIONS +IS_MAX_RATE = RATE == "max" +if IS_MAX_RATE && CONNECTIONS != MAX_CONNECTIONS raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Initialize summary file -File.write(SUMMARY_TXT, "") -add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength -# Fortio -if TOOLS.include?("fortio") - fortio_metrics = begin - puts "===> Fortio" +# Benchmark a single route with Fortio +def run_fortio_benchmark(target, route_name) + return nil unless TOOLS.include?("fortio") - fortio_json = "#{OUTDIR}/fortio.json" - fortio_txt = "#{OUTDIR}/fortio.txt" + begin + puts "===> Fortio: #{route_name}" + + fortio_json = "#{OUTDIR}/#{route_name}_fortio.json" + fortio_txt = "#{OUTDIR}/#{route_name}_fortio.txt" # Configure Fortio arguments # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass fortio_args = - if is_max_rate + if IS_MAX_RATE ["-qps", 0, "-c", CONNECTIONS] else ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] @@ -156,7 +178,7 @@ def server_responding?(uri) "-t", DURATION, "-timeout", REQUEST_TIMEOUT, "-json", fortio_json, - TARGET + target ].join(" ") raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") @@ -180,29 +202,29 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end - - add_summary_line("Fortio", *fortio_metrics) end -# Vegeta -if TOOLS.include?("vegeta") - vegeta_metrics = begin - puts "\n===> Vegeta" +# Benchmark a single route with Vegeta +def run_vegeta_benchmark(target, route_name) + return nil unless TOOLS.include?("vegeta") + + begin + puts "\n===> Vegeta: #{route_name}" - vegeta_bin = "#{OUTDIR}/vegeta.bin" - vegeta_json = "#{OUTDIR}/vegeta.json" - vegeta_txt = "#{OUTDIR}/vegeta.txt" + vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" # Configure Vegeta arguments vegeta_args = - if is_max_rate + if IS_MAX_RATE ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] else ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] end vegeta_cmd = [ - "echo 'GET #{TARGET}' |", + "echo 'GET #{target}' |", "vegeta", "attack", *vegeta_args, "-duration=#{DURATION}", @@ -212,7 +234,6 @@ def server_responding?(uri) raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") vegeta_data = parse_json_file(vegeta_json, "Vegeta") - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" @@ -224,22 +245,22 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end - - add_summary_line("Vegeta", *vegeta_metrics) end -# k6 -if TOOLS.include?("k6") - k6_metrics = begin - puts "\n===> k6" +# Benchmark a single route with k6 +def run_k6_benchmark(target, route_name) + return nil unless TOOLS.include?("k6") + + begin + puts "\n===> k6: #{route_name}" - k6_script_file = "#{OUTDIR}/k6_test.js" - k6_summary_json = "#{OUTDIR}/k6_summary.json" - k6_txt = "#{OUTDIR}/k6.txt" + k6_script_file = "#{OUTDIR}/#{route_name}_k6_test.js" + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" # Configure k6 scenarios k6_scenarios = - if is_max_rate + if IS_MAX_RATE <<~JS.strip { max_rate: { @@ -273,11 +294,9 @@ def server_responding?(uri) }; export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + const response = http.get('#{target}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, }); } JS @@ -294,8 +313,6 @@ def server_responding?(uri) # Status: compute successful vs failed requests k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 k6_checks = k6_data.dig("root_group", "checks") || {} - # Extract status code from check name (e.g., "status=200" -> "200") - # Handle both "status=XXX" format and other potential formats k6_status_parts = k6_checks.map do |name, check| status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name "#{status_label}=#{check['passes']}" @@ -310,8 +327,44 @@ def server_responding?(uri) puts "Error: #{e.message}" failure_metrics(e) end +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_summary_line("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") + +# Run benchmarks for each route +routes.each do |route| + separator = "=" * 80 + puts "\n#{separator}" + puts "Benchmarking route: #{route}" + puts separator + + target = URI.parse("http://#{BASE_URL}#{route}") + + # Warm up server for this route + puts "Warming up server for #{route} with 10 requests..." + 10.times do + server_responding?(target) + sleep 0.5 + end + puts "Warm-up complete for #{route}" + + # Sanitize route name for filenames + route_name = route.gsub(%r{^/}, "").tr("/", "_") + route_name = "root" if route_name.empty? + + # Run each benchmark tool + fortio_metrics = run_fortio_benchmark(target, route_name) + add_summary_line(route, "Fortio", *fortio_metrics) if fortio_metrics + + vegeta_metrics = run_vegeta_benchmark(target, route_name) + add_summary_line(route, "Vegeta", *vegeta_metrics) if vegeta_metrics - add_summary_line("k6", *k6_metrics) + k6_metrics = run_k6_benchmark(target, route_name) + add_summary_line(route, "k6", *k6_metrics) if k6_metrics end puts "\nSummary saved to #{SUMMARY_TXT}" From dc96f80f0f433b23c8bf60bf4e34f20e5a6f8891 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:26:49 +0000 Subject: [PATCH 21/40] Fix Fortio failure on server_side_log_throw_raise --- spec/performance/bench.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 0323382b87..9fe00a728d 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -177,6 +177,8 @@ def run_fortio_benchmark(target, route_name) *fortio_args, "-t", DURATION, "-timeout", REQUEST_TIMEOUT, + # Allow redirects. Could use -L instead, but it uses the slower HTTP client. + "-allow-initial-errors", "-json", fortio_json, target ].join(" ") From e14fc93dad44f4ef9a3030a0d0720ff1a69b65db Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:48:49 +0000 Subject: [PATCH 22/40] Allow specifying routes --- .github/workflows/benchmark.yml | 5 +++++ spec/performance/bench.rb | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 388ac9c722..2fe7698aa2 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -9,6 +9,10 @@ on: required: false default: false type: boolean + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string rate: description: 'Requests per second (use "max" for maximum throughput)' required: false @@ -67,6 +71,7 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Benchmark parameters + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 9fe00a728d..eb005c9212 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -10,6 +10,7 @@ # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +ROUTES = ENV.fetch("ROUTES", nil) BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") @@ -90,7 +91,12 @@ def get_benchmark_routes(app_dir) end # Get all routes to benchmark -routes = get_benchmark_routes(APP_DIR) +routes = + if ROUTES + ROUTES.split(",").map(&:strip) + else + get_benchmark_routes(APP_DIR) + end validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") @@ -109,6 +115,7 @@ def get_benchmark_routes(app_dir) puts <<~PARAMS Benchmark parameters: - APP_DIR: #{APP_DIR} + - ROUTES: #{ROUTES || 'auto-detect from Rails'} - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} From 40c72cedeb3d8b7240c488bbdad43eddd7dfa60e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:10:20 +0000 Subject: [PATCH 23/40] Add pro benchmarks --- .github/workflows/benchmark.yml | 220 +++++++++++++++--- react_on_rails_pro/spec/dummy/Procfile.prod | 6 + react_on_rails_pro/spec/dummy/bin/prod | 40 ++++ react_on_rails_pro/spec/dummy/bin/prod-assets | 8 + .../dummy/config/environments/production.rb | 3 + 5 files changed, 241 insertions(+), 36 deletions(-) create mode 100644 react_on_rails_pro/spec/dummy/Procfile.prod create mode 100755 react_on_rails_pro/spec/dummy/bin/prod create mode 100755 react_on_rails_pro/spec/dummy/bin/prod-assets diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 2fe7698aa2..d6d5d2c7d5 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -29,33 +29,34 @@ on: default: '60s' type: string connections: - description: 'Concurrent connections/virtual users' + description: 'Concurrent connections/virtual users (also used as max)' required: false default: 10 type: number - max_connections: - description: 'Maximum connections/virtual users' - required: false - type: number web_concurrency: description: 'Number of Puma worker processes' required: false default: 4 type: number - rails_max_threads: - description: 'Maximum number of Puma threads' + rails_threads: + description: 'Number of Puma threads (min and max will be same)' required: false default: 3 type: number - rails_min_threads: - description: 'Minimum number of Puma threads (same as maximum if not set)' - required: false - type: number tools: description: 'Comma-separated list of tools to run' required: false default: 'fortio,vegeta,k6' type: string + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' push: branches: - master @@ -76,15 +77,17 @@ env: DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} - RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} - RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: runs-on: ubuntu-latest + env: + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} steps: # ============================================ @@ -181,8 +184,8 @@ jobs: - name: Setup Ruby uses: ruby/setup-ruby@v1 with: - ruby-version: '3.4' - bundler: 2.5.9 + ruby-version: '3.3.7' + bundler: 2.5.4 - name: Cache foreman gem id: cache-foreman @@ -224,29 +227,34 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yalc add react-on-rails - name: Install Node modules with Yarn for dummy app + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yarn install --no-progress --no-emoji - name: Save dummy app ruby gems to cache + if: github.event.inputs.app_version != 'pro_only' uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for dummy app + if: github.event.inputs.app_version != 'pro_only' run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' if ! bundle check --path=vendor/bundle; then - bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 fi - name: generate file system-based packs run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs - name: Prepare production assets + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -260,6 +268,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start production server + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -284,14 +293,15 @@ jobs: exit 1 # ============================================ - # STEP 5: RUN BENCHMARKS + # STEP 5: RUN CORE BENCHMARKS # ============================================ - - name: Execute benchmark suite - timeout-minutes: 20 + - name: Execute Core benchmark suite + if: github.event.inputs.app_version != 'pro_only' + timeout-minutes: 120 run: | set -e # Exit on any error - echo "🏃 Running benchmark suite..." + echo "🏃 Running Core benchmark suite..." if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" @@ -300,7 +310,8 @@ jobs: echo "✅ Benchmark suite completed successfully" - - name: Validate benchmark results + - name: Validate Core benchmark results + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -338,39 +349,176 @@ jobs: echo "Continuing with available results..." fi - # ============================================ - # STEP 6: COLLECT BENCHMARK RESULTS - # ============================================ - - - name: Upload benchmark results + - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: always() # Upload even if benchmark fails + if: github.event.inputs.app_version != 'pro_only' && always() with: - name: benchmark-results-${{ github.run_number }} + name: benchmark-core-results-${{ github.run_number }} path: bench_results/ retention-days: 30 if-no-files-found: warn - - name: Verify artifact upload - if: success() + # ============================================ + # STEP 6: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro package node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/node_modules + key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} + + - name: Cache Pro dummy app node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} + + - name: Cache Pro dummy app Ruby gems + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Node modules with Yarn for Pro package + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro + sudo yarn global add yalc + yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Node modules with Yarn for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Ruby Gems for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro/spec/dummy + bundle lock --add-platform 'x86_64-linux' + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: github.event.inputs.app_version != 'core_only' run: | - echo "✅ Benchmark results uploaded as workflow artifacts" - echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" - echo "🔗 Access artifacts from the Actions tab in GitHub" + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 # ============================================ - # WORKFLOW COMPLETION + # STEP 7: RUN PRO BENCHMARKS # ============================================ + - name: Execute Pro benchmark suite + if: github.event.inputs.app_version != 'core_only' + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby spec/performance/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate Pro benchmark results + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🔍 Validating Pro benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: github.event.inputs.app_version != 'core_only' && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ - name: Workflow summary if: always() run: | echo "📋 Benchmark Workflow Summary" - echo "==============================" + echo "====================================" echo "Status: ${{ job.status }}" echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" + echo "App version: ${{ github.event.inputs.app_version || 'both' }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" diff --git a/react_on_rails_pro/spec/dummy/Procfile.prod b/react_on_rails_pro/spec/dummy/Procfile.prod new file mode 100644 index 0000000000..d47e98ef15 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/Procfile.prod @@ -0,0 +1,6 @@ +# Procfile for production mode (precompiled assets) + +rails: RAILS_ENV=production NODE_ENV=production bin/rails s -p 3001 + +# Start Node server for server rendering. +node-renderer: NODE_ENV=production RENDERER_LOG_LEVEL=error RENDERER_PORT=3800 node client/node-renderer.js diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod new file mode 100755 index 0000000000..647a9c2d96 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +export NODE_ENV=production +export RAILS_ENV=production + +if command -v overmind &> /dev/null; then + overmind start -f Procfile.prod +elif command -v foreman &> /dev/null; then + foreman start -f Procfile.prod +else + echo "Installing foreman..." + gem install foreman + foreman start -f Procfile.prod +fi diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..96be6c50e8 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails assets:precompile diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 519aa382d6..1c8daba3b2 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -3,6 +3,9 @@ Rails.application.configure do # Settings specified here will take precedence over those in config/application.rb. + # Use a hardcoded secret for this test/dummy app (not for real production use) + config.secret_key_base = ENV.fetch("SECRET_KEY_BASE", "dummy-secret-key-base-for-testing-only") + # Code is not reloaded between requests. config.cache_classes = true From a362d9152d5da7f69d0a0f7940417ffcee6b6a52 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:11:11 +0000 Subject: [PATCH 24/40] Update Claude instructions --- CLAUDE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index af69392b38..7d36ea5aef 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -143,6 +143,8 @@ Pre-commit hooks automatically run: - All linters: `rake lint` (runs ESLint and RuboCop) - ESLint only: `pnpm run lint` or `rake lint:eslint` - RuboCop only: `rake lint:rubocop` + - GitHub Action files (workflows, reusable actions, etc.): `actionlint` + - YAML files: `yamllint` (or validate the syntax with Ruby if it isn't installed). Do _not_ try to run RuboCop on `.yml` files. - **Code Formatting**: - Format code with Prettier: `rake autofix` - Check formatting without fixing: `pnpm run format.listDifferent` From ab23c25f142e43a1f86f7e4e63bd04379aa7bde5 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 19:48:24 +0000 Subject: [PATCH 25/40] FIXME temp commit --- .github/workflows/benchmark.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d6d5d2c7d5..fdd6809442 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -51,7 +51,8 @@ on: app_version: description: 'Which app version to benchmark' required: false - default: 'both' + # FIXME: for debugging, restore 'both' before merging + default: 'pro_only' type: choice options: - 'both' From d38d314ebaf84e2520ef72526499c0ec6b5e75f0 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:08:30 +0000 Subject: [PATCH 26/40] Update prod-assets to include generate_packs --- .github/workflows/benchmark.yml | 11 ++++------- react_on_rails_pro/spec/dummy/bin/prod-assets | 1 + 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index fdd6809442..87c71cc7c9 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -88,7 +88,8 @@ jobs: benchmark: runs-on: ubuntu-latest env: - REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} steps: # ============================================ @@ -247,12 +248,8 @@ jobs: run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' - if ! bundle check --path=vendor/bundle; then - bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 - fi - - - name: generate file system-based packs - run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Prepare production assets if: github.event.inputs.app_version != 'pro_only' diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets index 96be6c50e8..828b1e6ae8 100755 --- a/react_on_rails_pro/spec/dummy/bin/prod-assets +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -5,4 +5,5 @@ export RAILS_ENV=production if [ "$CI" = "true" ]; then bundle exec bootsnap precompile --gemfile app/ lib/ config/ fi +bundle exec rails react_on_rails:generate_packs bundle exec rails assets:precompile From 54ec3265228d1df42c692e23fae6ee2e65e2d050 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:34:07 +0000 Subject: [PATCH 27/40] Disable js_compressor and css_compressor --- react_on_rails_pro/Gemfile.development_dependencies | 1 - react_on_rails_pro/Gemfile.lock | 3 --- react_on_rails_pro/spec/dummy/Gemfile.lock | 3 --- .../spec/dummy/config/environments/production.rb | 5 +++-- 4 files changed, 3 insertions(+), 9 deletions(-) diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 926c07ec3a..fdb65a9b5e 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -20,7 +20,6 @@ gem "pg" # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" -gem "uglifier" gem "jquery-rails" gem "sprockets" gem "sass-rails" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index a3ee4fe390..ea8d6735f8 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -434,8 +434,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -514,7 +512,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index 30b28cd92f..6bae5538db 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -470,8 +470,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -563,7 +561,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 1c8daba3b2..330291b114 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -22,8 +22,9 @@ config.public_file_server.enabled = true # Compress JavaScripts and CSS. - config.assets.js_compressor = Uglifier.new(harmony: true) - config.assets.css_compressor = :csso + # JS/CSS compression handled by Webpack/Shakapacker, not needed for Sprockets + # config.assets.js_compressor = Uglifier.new(harmony: true) + # config.assets.css_compressor = :csso # Do not fallback to assets pipeline if a precompiled asset is missed. config.assets.compile = false From 8aa38e2433e87cb9de0b77c62c7c739b53883074 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:51:45 +0000 Subject: [PATCH 28/40] Remove unused pg gem --- react_on_rails_pro/Gemfile.development_dependencies | 2 -- react_on_rails_pro/Gemfile.lock | 2 -- react_on_rails_pro/spec/dummy/Gemfile.lock | 2 -- 3 files changed, 6 deletions(-) diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index fdb65a9b5e..eaa2658d9c 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -15,8 +15,6 @@ gem "puma", "~> 6" # Build JSON APIs with ease. Read more: https://github.com/rails/jbuilder gem "jbuilder" -gem "pg" - # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index ea8d6735f8..b613a655a4 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -250,7 +250,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -486,7 +485,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg pry (>= 0.14.1) pry-byebug! pry-doc diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index 6bae5538db..644e11dd46 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -275,7 +275,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -533,7 +532,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg prism-rails pry (>= 0.14.1) pry-byebug! From 841432222a8efe855555676e82582330a0a75383 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:22:11 +0000 Subject: [PATCH 29/40] Handle empty inputs correctly --- .github/workflows/benchmark.yml | 12 ++++++------ spec/performance/bench.rb | 27 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 87c71cc7c9..78923385d0 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -72,17 +72,17 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" - # Benchmark parameters + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '30s' }} - REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + TOOLS: ${{ github.event.inputs.tools }} jobs: benchmark: diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index eb005c9212..81b797f2c3 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -7,23 +7,30 @@ require "net/http" require "uri" +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" -ROUTES = ENV.fetch("ROUTES", nil) -BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTES = env_or_default("ROUTES", nil) +BASE_URL = env_or_default("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE = ENV.fetch("RATE", "50") +RATE = env_or_default("RATE", "50") # concurrent connections/virtual users -CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i # benchmark duration (duration string like "30s", "1m", "90s") -DURATION = ENV.fetch("DURATION", "30s") +DURATION = env_or_default("DURATION", "30s") # request timeout (duration string as above) -REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) -TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") +TOOLS = env_or_default("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze @@ -93,11 +100,13 @@ def get_benchmark_routes(app_dir) # Get all routes to benchmark routes = if ROUTES - ROUTES.split(",").map(&:strip) + ROUTES.split(",").map(&:strip).reject(&:empty?) else get_benchmark_routes(APP_DIR) end +raise "No routes to benchmark" if routes.empty? + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") From 34fd9fbd94f527016ce69f50fc284073c9cb18cb Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:32:51 +0000 Subject: [PATCH 30/40] Fix app version handling in the benchmark workflow --- .github/workflows/benchmark.yml | 58 ++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 78923385d0..b3e7bb6cb5 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -72,6 +72,9 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" + # Determine which apps to run (default is 'pro_only' for all triggers) + RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} @@ -229,30 +232,30 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: cd spec/dummy && yalc add react-on-rails - - name: Install Node modules with Yarn for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Node modules with Yarn for Core dummy app + if: env.RUN_CORE run: cd spec/dummy && yarn install --no-progress --no-emoji - - name: Save dummy app ruby gems to cache - if: github.event.inputs.app_version != 'pro_only' + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - - name: Install Ruby Gems for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - - name: Prepare production assets - if: github.event.inputs.app_version != 'pro_only' + - name: Prepare Core production assets + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -265,8 +268,8 @@ jobs: echo "✅ Production assets built successfully" - - name: Start production server - if: github.event.inputs.app_version != 'pro_only' + - name: Start Core production server + if: env.RUN_CORE run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -295,7 +298,7 @@ jobs: # ============================================ - name: Execute Core benchmark suite - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE timeout-minutes: 120 run: | set -e # Exit on any error @@ -309,7 +312,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Core benchmark results - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -349,7 +352,7 @@ jobs: - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'pro_only' && always() + if: env.RUN_CORE && always() with: name: benchmark-core-results-${{ github.run_number }} path: bench_results/ @@ -360,39 +363,39 @@ jobs: # STEP 6: SETUP PRO APPLICATION SERVER # ============================================ - name: Cache Pro package node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/node_modules key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} - name: Cache Pro dummy app node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/node_modules key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} - name: Cache Pro dummy app Ruby gems - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/vendor/bundle key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} - name: Install Node modules with Yarn for Pro package - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro sudo yarn global add yalc yarn install --frozen-lockfile --no-progress --no-emoji - name: Install Node modules with Yarn for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Install Ruby Gems for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy bundle lock --add-platform 'x86_64-linux' @@ -400,11 +403,11 @@ jobs: bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs - name: Prepare Pro production assets - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔨 Building Pro production assets..." @@ -418,7 +421,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start Pro production server - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🚀 Starting Pro production server..." @@ -447,7 +450,7 @@ jobs: # ============================================ - name: Execute Pro benchmark suite - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO timeout-minutes: 120 run: | set -e @@ -461,7 +464,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Pro benchmark results - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔍 Validating Pro benchmark output files..." @@ -497,7 +500,7 @@ jobs: - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'core_only' && always() + if: env.RUN_PRO && always() with: name: benchmark-pro-results-${{ github.run_number }} path: bench_results/ @@ -516,7 +519,8 @@ jobs: echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" - echo "App version: ${{ github.event.inputs.app_version || 'both' }}" + echo "Run Core: ${{ env.RUN_CORE }}" + echo "Run Pro: ${{ env.RUN_PRO }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" From b655ba522c047f1918788b6bfe477f7c70572fef Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 22:11:00 +0000 Subject: [PATCH 31/40] Fix starting/stopping servers --- .github/workflows/benchmark.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index b3e7bb6cb5..a38a0dfd2f 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -311,6 +311,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Core benchmark results if: env.RUN_CORE run: | @@ -463,6 +475,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Pro benchmark results if: env.RUN_PRO run: | From 5618a25ab92fd71186934dfd80cb79f3a221a3ef Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 09:55:31 +0000 Subject: [PATCH 32/40] Simplify validate steps --- .github/workflows/benchmark.yml | 76 +++++++++------------------------ 1 file changed, 20 insertions(+), 56 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a38a0dfd2f..61300b3552 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -326,41 +326,21 @@ jobs: - name: Validate Core benchmark results if: env.RUN_CORE run: | - set -e # Exit on any error - echo "🔍 Validating benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + set -e + echo "🔍 Validating benchmark results..." - # Check if results directory exists - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - - # List all generated files - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true - echo "" - # Check for required files - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - # Report validation results - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Core benchmark results uses: actions/upload-artifact@v4 @@ -491,36 +471,20 @@ jobs: if: env.RUN_PRO run: | set -e - echo "🔍 Validating Pro benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + echo "🔍 Validating benchmark results..." - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true + echo "✅ Benchmark results found" echo "" - - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 From 8aee006c199132462c52327f71365c462f1cc5fb Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:04:27 +0000 Subject: [PATCH 33/40] Temp config to speed up --- .github/workflows/benchmark.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 61300b3552..d11176c5ae 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -51,8 +51,7 @@ on: app_version: description: 'Which app version to benchmark' required: false - # FIXME: for debugging, restore 'both' before merging - default: 'pro_only' + default: 'both' type: choice options: - 'both' @@ -73,19 +72,20 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Determine which apps to run (default is 'pro_only' for all triggers) - RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} - RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} + RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - ROUTES: ${{ github.event.inputs.routes }} + # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging + ROUTES: ${{ github.event.inputs.routes || '/' }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration }} + DURATION: ${{ github.event.inputs.duration || '5s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools }} + TOOLS: ${{ github.event.inputs.tools || 'fortio' }} jobs: benchmark: From bcd7029c76e5635dec96908e97b7ac7e801661c4 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:10:56 +0000 Subject: [PATCH 34/40] Optimize tools installation --- .github/workflows/benchmark.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index d11176c5ae..1346c56570 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -141,13 +141,14 @@ jobs: - name: Cache Fortio binary id: cache-fortio + if: contains(env.TOOLS, 'fortio') uses: actions/cache@v4 with: path: ~/bin/fortio key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} - name: Install Fortio - if: steps.cache-fortio.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'fortio') && steps.cache-fortio.outputs.cache-hit != 'true' run: | echo "📦 Installing Fortio v${FORTIO_VERSION}" @@ -160,13 +161,14 @@ jobs: - name: Cache Vegeta binary id: cache-vegeta + if: contains(env.TOOLS, 'vegeta') uses: actions/cache@v4 with: path: ~/bin/vegeta key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - name: Install Vegeta - if: steps.cache-vegeta.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'vegeta') && steps.cache-vegeta.outputs.cache-hit != 'true' run: | echo "📦 Installing Vegeta v${VEGETA_VERSION}" @@ -178,6 +180,7 @@ jobs: mv vegeta ~/bin/ - name: Setup k6 + if: contains(env.TOOLS, 'k6') uses: grafana/setup-k6-action@v1 with: k6-version: ${{ env.K6_VERSION }} From a0e790ff4cacc355d0168f2daf143dbb78a3d1f9 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 11:48:59 +0000 Subject: [PATCH 35/40] Add logging to server check --- spec/performance/bench.rb | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 81b797f2c3..f13b60710b 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,9 +140,9 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - response.is_a?(Net::HTTPSuccess) -rescue StandardError - false + { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } end # Wait for the server to be ready @@ -150,10 +150,24 @@ def server_responding?(uri) puts "Checking server availability at #{BASE_URL}..." test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now +attempt_count = 0 loop do - break if server_responding?(test_uri) + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(test_uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + # rubocop:disable Layout/LineLength + if result[:success] + puts " ✅ Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} (took #{attempt_duration.round(3)}s)" + break + else + puts " ❌ Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} (took #{attempt_duration.round(3)}s)" + end + # rubocop:enable Layout/LineLength - raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if elapsed > TIMEOUT_SEC sleep 1 end From 5eac9895131c4625f1427873d9fa8ecc508569b0 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:02:21 +0000 Subject: [PATCH 36/40] Make installs frozen --- .github/workflows/benchmark.yml | 10 +++++----- react_on_rails/spec/dummy/Gemfile.lock | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 1346c56570..2fd68646b5 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -240,7 +240,7 @@ jobs: - name: Install Node modules with Yarn for Core dummy app if: env.RUN_CORE - run: cd spec/dummy && yarn install --no-progress --no-emoji + run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE @@ -253,9 +253,9 @@ jobs: if: env.RUN_CORE run: | cd spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 - name: Prepare Core production assets if: env.RUN_CORE @@ -393,9 +393,9 @@ jobs: if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro if: env.RUN_PRO diff --git a/react_on_rails/spec/dummy/Gemfile.lock b/react_on_rails/spec/dummy/Gemfile.lock index eb919c4013..0f81d8abaf 100644 --- a/react_on_rails/spec/dummy/Gemfile.lock +++ b/react_on_rails/spec/dummy/Gemfile.lock @@ -197,6 +197,8 @@ GEM nokogiri (1.18.10) mini_portile2 (~> 2.8.2) racc (~> 1.4) + nokogiri (1.18.10-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) package_json (0.1.0) parallel (1.24.0) @@ -423,6 +425,7 @@ GEM PLATFORMS ruby + x86_64-linux DEPENDENCIES amazing_print From 54241f6a74522647bdd04970d8e4c480fd565336 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:05:34 +0000 Subject: [PATCH 37/40] Allow redirects in server_responding --- spec/performance/bench.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index f13b60710b..c7b5b24d4d 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,7 +140,11 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } rescue StandardError => e { success: false, info: "#{e.class.name}: #{e.message}" } end From 422fa4382aae745fee4e1b633c7f57147bfbbeea Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:24:32 +0000 Subject: [PATCH 38/40] Try full Pro benchmark --- .github/workflows/benchmark.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 2fd68646b5..2e47ee38c1 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -75,17 +75,16 @@ env: RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging - ROUTES: ${{ github.event.inputs.routes || '/' }} + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '5s' }} + DURATION: ${{ github.event.inputs.duration }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio' }} + TOOLS: ${{ github.event.inputs.tools }} jobs: benchmark: From db480bf9e9431ff5fe06f9cbe02d34ac95416070 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 1 Dec 2025 12:13:17 +0000 Subject: [PATCH 39/40] Update Core dummy path --- .github/workflows/benchmark.yml | 12 ++++++------ spec/performance/bench.rb | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 2e47ee38c1..6b14ac0fdf 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -235,23 +235,23 @@ jobs: - name: yalc add react-on-rails if: env.RUN_CORE - run: cd spec/dummy && yalc add react-on-rails + run: cd react_on_rails/spec/dummy && yalc add react-on-rails - name: Install Node modules with Yarn for Core dummy app if: env.RUN_CORE - run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + run: cd react_on_rails/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE uses: actions/cache@v4 with: - path: spec/dummy/vendor/bundle + path: react_on_rails/spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for Core dummy app if: env.RUN_CORE run: | - cd spec/dummy + cd react_on_rails/spec/dummy bundle config set path vendor/bundle bundle config set frozen true bundle _2.5.4_ install --jobs=4 --retry=3 @@ -261,7 +261,7 @@ jobs: run: | set -e # Exit on any error echo "🔨 Building production assets..." - cd spec/dummy + cd react_on_rails/spec/dummy if ! bin/prod-assets; then echo "❌ ERROR: Failed to build production assets" @@ -275,7 +275,7 @@ jobs: run: | set -e # Exit on any error echo "🚀 Starting production server..." - cd spec/dummy + cd react_on_rails/spec/dummy # Start server in background bin/prod & diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index c7b5b24d4d..7c9cda0726 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -16,7 +16,7 @@ def env_or_default(key, default) # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" -APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "react_on_rails/spec/dummy" ROUTES = env_or_default("ROUTES", nil) BASE_URL = env_or_default("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate From 530d99b6ec20c8e4ff642272e19e15ac707fe652 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 1 Dec 2025 20:34:25 +0000 Subject: [PATCH 40/40] Temporary root RuboCop config --- .rubocop.yml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .rubocop.yml diff --git a/.rubocop.yml b/.rubocop.yml new file mode 100644 index 0000000000..218622a8a6 --- /dev/null +++ b/.rubocop.yml @@ -0,0 +1,9 @@ +# Root config reuses the one from react_on_rails for now, +# later it should be the other way around +inherit_from: react_on_rails/.rubocop.yml + +AllCops: + Exclude: + - 'gen-examples/**/*' + - 'react_on_rails/**/*' + - 'react_on_rails_pro/**/*'