diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..41d10f9d6 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,340 @@ +name: Benchmark Workflow + +on: + # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug + workflow_dispatch: + inputs: + debug_enabled: + description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' + required: false + default: false + type: boolean + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: '50' + type: string + duration_sec: + description: 'Duration in seconds' + required: false + default: 10 + type: number + vus: + description: 'Virtual users for k6' + required: false + default: 100 + type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string + push: + branches: + - main + pull_request: + +env: + FORTIO_VERSION: "1.72.0" + K6_VERSION: "1.3.0" + VEGETA_VERSION: "12.12.0" + # Benchmark defaults (overridden by workflow_dispatch inputs) + RATE: ${{ github.event.inputs.rate || '50' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + VUS: ${{ github.event.inputs.vus || '100' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + +jobs: + benchmark: + runs-on: ubuntu-latest + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: OPTIONAL SSH ACCESS + # ============================================ + # NOTE: Interactive confirmation is not possible in GitHub Actions. + # As a secure workaround, SSH access is gated by the workflow_dispatch + # input variable 'debug_enabled' which defaults to false. + # Users must explicitly set this to true to enable SSH. + + - name: SSH Warning + if: ${{ github.event.inputs.debug_enabled }} + run: | + echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" + echo "" + echo "SECURITY NOTICE:" + echo " - SSH access exposes your GitHub Actions runner" + echo " - Only proceed if you understand and accept the risks" + echo " - Do NOT store secrets or sensitive data on the runner" + echo " - Access is limited to the workflow initiator only" + echo " - The session will remain open until manually terminated" + echo "" + echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" + + - name: Setup SSH access (if enabled) + if: ${{ github.event.inputs.debug_enabled }} + uses: mxschmitt/action-tmate@v3 + with: + detached: true + limit-access-to-actor: true # Only workflow trigger can access + + # ============================================ + # STEP 3: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Fortio binary + id: cache-fortio + uses: actions/cache@v4 + with: + path: ~/bin/fortio + key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} + + - name: Install Fortio + if: steps.cache-fortio.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Fortio v${FORTIO_VERSION}" + + # Download and extract fortio binary + wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz + tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz + + # Store in cache directory + mv fortio ~/bin/ + + - name: Cache Vegeta binary + id: cache-vegeta + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: v${{ env.K6_VERSION }} + + # ============================================ + # STEP 4: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler: 2.5.9 + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: yarn + cache-dependency-path: '**/yarn.lock' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Yarn version: "; yarn --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Yarn for renderer package + run: | + yarn install --no-progress --no-emoji --frozen-lockfile + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + + - name: yalc add react-on-rails + run: cd spec/dummy && yalc add react-on-rails + + - name: Install Node modules with Yarn for dummy app + run: cd spec/dummy && yarn install --no-progress --no-emoji + + - name: Save dummy app ruby gems to cache + uses: actions/cache@v4 + with: + path: spec/dummy/vendor/bundle + key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for dummy app + run: | + cd spec/dummy + bundle lock --add-platform 'x86_64-linux' + if ! bundle check --path=vendor/bundle; then + bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + fi + + - name: generate file system-based packs + run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + + - name: Prepare production assets + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start production server + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd spec/dummy + + # Start server in background + bin/prod & + SERVER_PID=$! + echo "Server started with PID: ${SERVER_PID}" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -sf http://localhost:3001 > /dev/null 2>&1; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 5: RUN BENCHMARKS + # ============================================ + + - name: Execute benchmark suite + run: | + set -e # Exit on any error + echo "🏃 Running benchmark suite..." + echo "Script: spec/performance/bench.sh" + echo "" + echo "Benchmark parameters:" + echo " - RATE: ${RATE}" + echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - VUS: ${VUS}" + echo " - TOOLS: ${TOOLS}" + echo "" + + # Make script executable and run + chmod +x spec/performance/bench.sh + + if ! spec/performance/bench.sh; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate benchmark results + run: | + set -e # Exit on any error + echo "🔍 Validating benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + # Check if results directory exists + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + # List all generated files + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + # Check for required files + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + # Report validation results + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + # ============================================ + # STEP 6: COLLECT BENCHMARK RESULTS + # ============================================ + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + if: always() # Upload even if benchmark fails + with: + name: benchmark-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Verify artifact upload + if: success() + run: | + echo "✅ Benchmark results uploaded as workflow artifacts" + echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" + echo "🔗 Access artifacts from the Actions tab in GitHub" + + # ============================================ + # WORKFLOW COMPLETION + # ============================================ + + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "==============================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi diff --git a/.gitignore b/.gitignore index 9aee9436a..79839d612 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,9 @@ yalc.lock /spec/dummy/.bsb.lock /spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/spec/dummy/bin/prod b/spec/dummy/bin/prod new file mode 100755 index 000000000..984ec12cf --- /dev/null +++ b/spec/dummy/bin/prod @@ -0,0 +1,4 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets +NODE_ENV=production RAILS_ENV=production bundle exec rails s -p 3001 diff --git a/spec/dummy/bin/prod-assets b/spec/dummy/bin/prod-assets new file mode 100755 index 000000000..96be6c50e --- /dev/null +++ b/spec/dummy/bin/prod-assets @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails assets:precompile diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh new file mode 100755 index 000000000..773744a66 --- /dev/null +++ b/spec/performance/bench.sh @@ -0,0 +1,162 @@ +#!/usr/bin/env bash +set -euo pipefail +#set -x # Uncomment for debugging commands + +# Benchmark parameters +TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE=${RATE:-50} +# virtual users for k6 +VUS=${VUS:-100} +DURATION_SEC=${DURATION_SEC:-10} +DURATION="${DURATION_SEC}s" +# Tools to run (comma-separated) +TOOLS=${TOOLS:-fortio,vegeta,k6} + +OUTDIR="bench_results" + +# Precompute checks for each tool +RUN_FORTIO=0 +RUN_VEGETA=0 +RUN_K6=0 +[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 +[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 +[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 + +for cmd in ${TOOLS//,/ } jq column awk tee; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required tool '$cmd' is not installed" >&2 + exit 1 + fi +done + +TIMEOUT_SEC=60 +START=$(date +%s) +until curl -fsS "$TARGET" >/dev/null; do + if (( $(date +%s) - START > TIMEOUT_SEC )); then + echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 + exit 1 + fi + sleep 1 +done + +mkdir -p "$OUTDIR" + +if [ "$RATE" = "max" ]; then + FORTIO_ARGS=(-qps 0) + VEGETA_ARGS=() + K6_SCENARIOS="{ + max_rate: { + executor: 'shared-iterations', + vus: $VUS, + iterations: $((VUS * DURATION_SEC * 10)), + maxDuration: '$DURATION' + } + }" +else + FORTIO_ARGS=(-qps "$RATE" -uniform) + VEGETA_ARGS=(-rate="$RATE") + K6_SCENARIOS="{ + constant_rate: { + executor: 'constant-arrival-rate', + rate: $RATE, + timeUnit: '1s', + duration: '$DURATION', + preAllocatedVUs: $VUS, + maxVUs: $((VUS * 10)) + } + }" +fi + +if (( RUN_FORTIO )); then + echo "===> Fortio" + # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + | tee "$OUTDIR/fortio.txt" +fi + +if (( RUN_VEGETA )); then + echo + echo "===> Vegeta" + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + | tee "$OUTDIR/vegeta.bin" \ + | vegeta report | tee "$OUTDIR/vegeta.txt" + vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" +fi + +if (( RUN_K6 )); then + echo + echo "===> k6" + cat < "$OUTDIR/k6_test.js" +import http from 'k6/http'; +import { check } from 'k6'; + +export const options = { + scenarios: $K6_SCENARIOS, +}; + +export default function () { + check(http.get('$TARGET'), { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); +} +EOF + + k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" +fi + +echo +echo "===> Parsing results and generating summary" + +echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" + +if (( RUN_FORTIO )); then + FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") + echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_VEGETA )); then + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") + echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_K6 )); then + K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + # Status: compute successful vs failed requests + K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") + K6_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.key[7:] + "=" + (.value.passes|tostring)) + | join(",") + ' "$OUTDIR/k6_summary.json") + K6_REQS_KNOWN_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.value.passes) + | add + ' "$OUTDIR/k6_summary.json") + K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) + if [ "$K6_REQS_OTHER" -gt 0 ]; then + K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" + fi + echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" +fi + +echo +echo "Summary saved to $OUTDIR/summary.txt" +column -t -s $'\t' "$OUTDIR/summary.txt"