-
Notifications
You must be signed in to change notification settings - Fork 125
benchmarks hardening! #1078
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
benchmarks hardening! #1078
Changes from 11 commits
e8090bd
8c01b99
2e1eb87
0668fed
f79f4d1
76a6e63
7333098
b62854c
db91d6a
87fcd74
f468fe5
d9e06f6
e17eb27
db77fb9
09cda88
3a77897
8e164b6
cfad099
53dce4a
4a45ef6
9dd52cc
b5cb016
66176e7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,157 @@ | ||
| #!/bin/bash | ||
| # Monitor a SLURM job and stream its output in real-time | ||
| # Usage: monitor_slurm_job.sh <job_id> <output_file> | ||
|
|
||
| set -euo pipefail | ||
|
|
||
| # Cleanup handler to prevent orphaned tail processes | ||
| cleanup() { | ||
| if [ -n "${tail_pid:-}" ]; then | ||
| kill "${tail_pid}" 2>/dev/null || true | ||
| fi | ||
| } | ||
| trap cleanup EXIT | ||
|
|
||
| if [ $# -ne 2 ]; then | ||
| echo "Usage: $0 <job_id> <output_file>" | ||
| exit 1 | ||
| fi | ||
|
|
||
| job_id="$1" | ||
| output_file="$2" | ||
|
|
||
| echo "Submitted batch job $job_id" | ||
| echo "Monitoring output file: $output_file" | ||
|
|
||
| # Wait for file to appear with retry logic for transient squeue failures | ||
| echo "Waiting for job to start..." | ||
| squeue_retries=0 | ||
| max_squeue_retries=5 | ||
| while [ ! -f "$output_file" ]; do | ||
| # Check if job is still queued/running | ||
| if squeue -j "$job_id" &>/dev/null; then | ||
| squeue_retries=0 # Reset on success | ||
| sleep 5 | ||
| else | ||
| squeue_retries=$((squeue_retries + 1)) | ||
| if [ $squeue_retries -ge $max_squeue_retries ]; then | ||
| # Job not in queue and output file doesn't exist | ||
| if [ ! -f "$output_file" ]; then | ||
| echo "ERROR: Job $job_id not in queue and output file not created" | ||
| exit 1 | ||
| fi | ||
| break | ||
| fi | ||
| # Exponential backoff | ||
| sleep_time=$((2 ** squeue_retries)) | ||
| echo "Warning: squeue check failed, retrying in ${sleep_time}s..." | ||
| sleep $sleep_time | ||
| fi | ||
| done | ||
|
|
||
| echo "=== Streaming output for job $job_id ===" | ||
| # Stream output while job runs (explicitly redirect to ensure output visibility) | ||
| tail -f "$output_file" 2>&1 & | ||
| tail_pid=$! | ||
|
|
||
| # Give tail a moment to start and show initial output | ||
| sleep 2 | ||
|
|
||
| # Wait for job to complete with retry logic for transient squeue failures | ||
| squeue_failures=0 | ||
| heartbeat_counter=0 | ||
| while true; do | ||
| if squeue -j "$job_id" &>/dev/null; then | ||
| squeue_failures=0 | ||
| # Print heartbeat every 60 seconds (12 iterations * 5 sec) | ||
| heartbeat_counter=$((heartbeat_counter + 1)) | ||
| if [ $((heartbeat_counter % 12)) -eq 0 ]; then | ||
| echo "[$(date +%H:%M:%S)] Job $job_id still running..." | ||
| fi | ||
| else | ||
| squeue_failures=$((squeue_failures + 1)) | ||
| # Check if job actually completed using sacct (if available) | ||
| if [ $squeue_failures -ge 3 ]; then | ||
| if command -v sacct >/dev/null 2>&1; then | ||
| state=$(sacct -j "$job_id" --format=State --noheader 2>/dev/null | head -n1 | awk '{print $1}') | ||
| # Consider job done only if it reached a terminal state | ||
| case "$state" in | ||
| COMPLETED|FAILED|CANCELLED|TIMEOUT|OUT_OF_MEMORY) | ||
| echo "[$(date +%H:%M:%S)] Job $job_id reached terminal state: $state" | ||
| break | ||
| ;; | ||
| *) | ||
| # treat as transient failure, reset failures and continue polling | ||
| squeue_failures=0 | ||
| ;; | ||
| esac | ||
| else | ||
| # No sacct: avoid false positive by doing an extra check cycle | ||
| squeue_failures=2 | ||
| fi | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Bug: Infinite loop when sacct command unavailable in monitoring scriptWhen |
||
| fi | ||
| fi | ||
| sleep 5 | ||
| done | ||
|
|
||
| # Wait for output file to finish growing (stabilize) before stopping tail | ||
| if [ -f "$output_file" ]; then | ||
| last_size=-1 | ||
| same_count=0 | ||
| while true; do | ||
| size=$(stat -c%s "$output_file" 2>/dev/null || echo -1) | ||
sbryngelson marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| if [ "$size" -eq "$last_size" ] && [ "$size" -ge 0 ]; then | ||
| same_count=$((same_count + 1)) | ||
| else | ||
| same_count=0 | ||
| last_size=$size | ||
| fi | ||
| # two consecutive stable checks (~10s) implies file likely flushed | ||
| if [ $same_count -ge 2 ]; then | ||
| break | ||
| fi | ||
| sleep 5 | ||
| done | ||
| fi | ||
|
|
||
| # Stop tailing (trap will also handle this on exit) | ||
| kill "${tail_pid}" 2>/dev/null || true | ||
|
|
||
| echo "" | ||
| echo "=== Final output ===" | ||
| cat "$output_file" | ||
|
|
||
| # Check exit status with sacct fallback | ||
| exit_code="" | ||
|
|
||
| # Try scontrol first (works for recent jobs) | ||
| scontrol_output=$(scontrol show job "$job_id" 2>/dev/null || echo "") | ||
| if [ -n "$scontrol_output" ]; then | ||
| exit_code=$(echo "$scontrol_output" | grep -oE 'ExitCode=[0-9]+:[0-9]+' | cut -d= -f2 || echo "") | ||
| fi | ||
sbryngelson marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| # If scontrol failed or returned invalid job, try sacct (for completed/aged-out jobs) | ||
| if [ -z "$exit_code" ]; then | ||
| echo "Warning: scontrol failed to get exit code, trying sacct..." | ||
| sacct_output=$(sacct -j "$job_id" --format=ExitCode --noheader --parsable2 2>/dev/null | head -n1 || echo "") | ||
| if [ -n "$sacct_output" ]; then | ||
| exit_code="$sacct_output" | ||
| fi | ||
| fi | ||
|
|
||
| # If we still can't determine exit code, fail explicitly | ||
| if [ -z "$exit_code" ]; then | ||
| echo "ERROR: Unable to determine exit status for job $job_id" | ||
| echo "Both scontrol and sacct failed to return valid exit code" | ||
| exit 1 | ||
| fi | ||
|
|
||
| # Check if job succeeded | ||
| if [ "$exit_code" != "0:0" ]; then | ||
| echo "ERROR: Job $job_id failed with exit code $exit_code" | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "Job $job_id completed successfully" | ||
| exit 0 | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,78 @@ | ||
| #!/bin/bash | ||
| # Run PR and master benchmarks in parallel and verify outputs | ||
| # Usage: run_parallel_benchmarks.sh <device> <interface> <cluster> | ||
|
|
||
| set -euo pipefail | ||
|
|
||
| if [ $# -ne 3 ]; then | ||
| echo "Usage: $0 <device> <interface> <cluster>" | ||
| exit 1 | ||
| fi | ||
|
|
||
| device="$1" | ||
| interface="$2" | ||
| cluster="$3" | ||
|
|
||
| echo "==========================================" | ||
| echo "Starting parallel benchmark jobs..." | ||
| echo "==========================================" | ||
|
|
||
| # Run both jobs with monitoring using dedicated script | ||
| (bash .github/scripts/submit_and_monitor_bench.sh pr "$device" "$interface" "$cluster") & | ||
| pr_pid=$! | ||
| echo "PR job started in background (PID: $pr_pid)" | ||
|
|
||
| (bash .github/scripts/submit_and_monitor_bench.sh master "$device" "$interface" "$cluster") & | ||
| master_pid=$! | ||
| echo "Master job started in background (PID: $master_pid)" | ||
|
|
||
| echo "Waiting for both jobs to complete..." | ||
|
|
||
| # Wait and capture exit codes reliably | ||
| pr_exit=0 | ||
| master_exit=0 | ||
|
|
||
| if ! wait "$pr_pid"; then | ||
| pr_exit=$? | ||
| echo "PR job exited with code: $pr_exit" | ||
| else | ||
| echo "PR job completed successfully" | ||
| fi | ||
|
|
||
| if ! wait "$master_pid"; then | ||
| master_exit=$? | ||
| echo "Master job exited with code: $master_exit" | ||
| else | ||
| echo "Master job completed successfully" | ||
| fi | ||
sbryngelson marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| # Check if either job failed | ||
| if [ "${pr_exit}" -ne 0 ] || [ "${master_exit}" -ne 0 ]; then | ||
| echo "ERROR: One or both benchmark jobs failed: pr_exit=${pr_exit}, master_exit=${master_exit}" | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "==========================================" | ||
| echo "Both benchmark jobs completed successfully!" | ||
| echo "==========================================" | ||
|
|
||
| # Final verification that output files exist before proceeding | ||
| pr_yaml="pr/bench-${device}-${interface}.yaml" | ||
| master_yaml="master/bench-${device}-${interface}.yaml" | ||
|
|
||
| if [ ! -f "$pr_yaml" ]; then | ||
| echo "ERROR: PR benchmark output not found: $pr_yaml" | ||
| ls -la pr/ || true | ||
| exit 1 | ||
| fi | ||
|
|
||
| if [ ! -f "$master_yaml" ]; then | ||
| echo "ERROR: Master benchmark output not found: $master_yaml" | ||
| ls -la master/ || true | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "Verified both YAML files exist:" | ||
| echo " - $pr_yaml" | ||
| echo " - $master_yaml" | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,51 @@ | ||
| #!/bin/bash | ||
| # Submit and monitor a benchmark job on a SLURM cluster | ||
| # Usage: submit_and_monitor_bench.sh <dir> <device> <interface> <cluster> | ||
|
|
||
| set -euo pipefail | ||
|
|
||
| if [ $# -ne 4 ]; then | ||
| echo "Usage: $0 <dir> <device> <interface> <cluster>" | ||
| exit 1 | ||
| fi | ||
|
|
||
| dir="$1" | ||
| device="$2" | ||
| interface="$3" | ||
| cluster="$4" | ||
|
|
||
| echo "[$dir] Submitting benchmark for $device-$interface on $cluster..." | ||
| cd "$dir" | ||
|
|
||
| # Submit job | ||
| submit_output=$(bash .github/workflows/$cluster/submit-bench.sh \ | ||
| .github/workflows/$cluster/bench.sh "$device" "$interface" 2>&1) | ||
|
|
||
| job_id=$(echo "$submit_output" | sed -n 's/.*Submitted batch job \([0-9][0-9]*\).*/\1/p') | ||
| job_slug="bench-$device-$interface" | ||
| output_file="${job_slug}.out" | ||
|
|
||
| if [ -z "$job_id" ]; then | ||
| echo "[$dir] ERROR: Failed to submit job" | ||
| echo "$submit_output" | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "[$dir] Job ID: $job_id, monitoring output file: $output_file" | ||
|
|
||
| # Use the monitoring script | ||
| bash .github/scripts/monitor_slurm_job.sh "$job_id" "$output_file" | ||
|
|
||
| echo "[$dir] Monitoring complete for job $job_id" | ||
|
|
||
| # Verify the YAML output file was created | ||
| yaml_file="${job_slug}.yaml" | ||
| if [ ! -f "$yaml_file" ]; then | ||
| echo "[$dir] ERROR: Expected output file not found: $yaml_file" | ||
| echo "[$dir] Directory contents:" | ||
| ls -la *.yaml 2>/dev/null || echo " No YAML files found" | ||
| exit 1 | ||
| fi | ||
|
|
||
| echo "[$dir] Verified output file exists: $yaml_file ($(stat -f%z "$yaml_file" 2>/dev/null || stat -c%s "$yaml_file" 2>/dev/null) bytes)" | ||
|
Comment on lines
44
to
58
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. YAML file existence check is incomplete — gap vs. PR objectives. The script verifies the YAML output file exists (lines 41–48) but does not validate its contents. According to the PR objectives, the hardening should include "Check summary file existence and required target-data fields (execution and simulation metrics such as grind)". The current validation only checks for file presence, not schema or required fields. Consider parsing the YAML and checking for required fields (e.g., execution time, simulation metrics) to align with the stated objectives. Would you like me to generate a helper function to validate the YAML structure and required fields? This could be added to the script or extracted as a reusable utility. |
||
|
|
||
coderabbitai[bot] marked this conversation as resolved.
Show resolved
Hide resolved
|
||
sbryngelson marked this conversation as resolved.
Show resolved
Hide resolved
|
Uh oh!
There was an error while loading. Please reload this page.