Skip to content
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 157 additions & 0 deletions .github/scripts/monitor_slurm_job.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
#!/bin/bash
# Monitor a SLURM job and stream its output in real-time
# Usage: monitor_slurm_job.sh <job_id> <output_file>

set -euo pipefail

# Cleanup handler to prevent orphaned tail processes
cleanup() {
if [ -n "${tail_pid:-}" ]; then
kill "${tail_pid}" 2>/dev/null || true
fi
}
trap cleanup EXIT

if [ $# -ne 2 ]; then
echo "Usage: $0 <job_id> <output_file>"
exit 1
fi

job_id="$1"
output_file="$2"

echo "Submitted batch job $job_id"
echo "Monitoring output file: $output_file"

# Wait for file to appear with retry logic for transient squeue failures
echo "Waiting for job to start..."
squeue_retries=0
max_squeue_retries=5
while [ ! -f "$output_file" ]; do
# Check if job is still queued/running
if squeue -j "$job_id" &>/dev/null; then
squeue_retries=0 # Reset on success
sleep 5
else
squeue_retries=$((squeue_retries + 1))
if [ $squeue_retries -ge $max_squeue_retries ]; then
# Job not in queue and output file doesn't exist
if [ ! -f "$output_file" ]; then
echo "ERROR: Job $job_id not in queue and output file not created"
exit 1
fi
break
fi
# Exponential backoff
sleep_time=$((2 ** squeue_retries))
echo "Warning: squeue check failed, retrying in ${sleep_time}s..."
sleep $sleep_time
fi
done

echo "=== Streaming output for job $job_id ==="
# Stream output while job runs (explicitly redirect to ensure output visibility)
tail -f "$output_file" 2>&1 &
tail_pid=$!

# Give tail a moment to start and show initial output
sleep 2

# Wait for job to complete with retry logic for transient squeue failures
squeue_failures=0
heartbeat_counter=0
while true; do
if squeue -j "$job_id" &>/dev/null; then
squeue_failures=0
# Print heartbeat every 60 seconds (12 iterations * 5 sec)
heartbeat_counter=$((heartbeat_counter + 1))
if [ $((heartbeat_counter % 12)) -eq 0 ]; then
echo "[$(date +%H:%M:%S)] Job $job_id still running..."
fi
else
squeue_failures=$((squeue_failures + 1))
# Check if job actually completed using sacct (if available)
if [ $squeue_failures -ge 3 ]; then
if command -v sacct >/dev/null 2>&1; then
state=$(sacct -j "$job_id" --format=State --noheader 2>/dev/null | head -n1 | awk '{print $1}')
# Consider job done only if it reached a terminal state
case "$state" in
COMPLETED|FAILED|CANCELLED|TIMEOUT|OUT_OF_MEMORY)
echo "[$(date +%H:%M:%S)] Job $job_id reached terminal state: $state"
break
;;
*)
# treat as transient failure, reset failures and continue polling
squeue_failures=0
;;
esac
else
# No sacct: avoid false positive by doing an extra check cycle
squeue_failures=2
fi
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Infinite loop when sacct command unavailable in monitoring script

When sacct is not available on the system and squeue fails to find a completed job, the script enters an infinite loop. After 3 consecutive squeue failures, the code checks for sacct. If unavailable, it sets squeue_failures=2, which means the next iteration increments to 3, triggers the same check, and resets to 2 again—forever. This causes the benchmark workflow to hang indefinitely on systems without sacct.

Fix in Cursor Fix in Web

fi
fi
sleep 5
done

# Wait for output file to finish growing (stabilize) before stopping tail
if [ -f "$output_file" ]; then
last_size=-1
same_count=0
while true; do
size=$(stat -c%s "$output_file" 2>/dev/null || echo -1)
if [ "$size" -eq "$last_size" ] && [ "$size" -ge 0 ]; then
same_count=$((same_count + 1))
else
same_count=0
last_size=$size
fi
# two consecutive stable checks (~10s) implies file likely flushed
if [ $same_count -ge 2 ]; then
break
fi
sleep 5
done
fi

# Stop tailing (trap will also handle this on exit)
kill "${tail_pid}" 2>/dev/null || true

echo ""
echo "=== Final output ==="
cat "$output_file"

# Check exit status with sacct fallback
exit_code=""

# Try scontrol first (works for recent jobs)
scontrol_output=$(scontrol show job "$job_id" 2>/dev/null || echo "")
if [ -n "$scontrol_output" ]; then
exit_code=$(echo "$scontrol_output" | grep -oE 'ExitCode=[0-9]+:[0-9]+' | cut -d= -f2 || echo "")
fi

# If scontrol failed or returned invalid job, try sacct (for completed/aged-out jobs)
if [ -z "$exit_code" ]; then
echo "Warning: scontrol failed to get exit code, trying sacct..."
sacct_output=$(sacct -j "$job_id" --format=ExitCode --noheader --parsable2 2>/dev/null | head -n1 || echo "")
if [ -n "$sacct_output" ]; then
exit_code="$sacct_output"
fi
fi

# If we still can't determine exit code, fail explicitly
if [ -z "$exit_code" ]; then
echo "ERROR: Unable to determine exit status for job $job_id"
echo "Both scontrol and sacct failed to return valid exit code"
exit 1
fi

# Check if job succeeded
if [ "$exit_code" != "0:0" ]; then
echo "ERROR: Job $job_id failed with exit code $exit_code"
exit 1
fi

echo "Job $job_id completed successfully"
exit 0

78 changes: 78 additions & 0 deletions .github/scripts/run_parallel_benchmarks.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/bin/bash
# Run PR and master benchmarks in parallel and verify outputs
# Usage: run_parallel_benchmarks.sh <device> <interface> <cluster>

set -euo pipefail

if [ $# -ne 3 ]; then
echo "Usage: $0 <device> <interface> <cluster>"
exit 1
fi

device="$1"
interface="$2"
cluster="$3"

echo "=========================================="
echo "Starting parallel benchmark jobs..."
echo "=========================================="

# Run both jobs with monitoring using dedicated script
(bash .github/scripts/submit_and_monitor_bench.sh pr "$device" "$interface" "$cluster") &
pr_pid=$!
echo "PR job started in background (PID: $pr_pid)"

(bash .github/scripts/submit_and_monitor_bench.sh master "$device" "$interface" "$cluster") &
master_pid=$!
echo "Master job started in background (PID: $master_pid)"

echo "Waiting for both jobs to complete..."

# Wait and capture exit codes reliably
pr_exit=0
master_exit=0

if ! wait "$pr_pid"; then
pr_exit=$?
echo "PR job exited with code: $pr_exit"
else
echo "PR job completed successfully"
fi

if ! wait "$master_pid"; then
master_exit=$?
echo "Master job exited with code: $master_exit"
else
echo "Master job completed successfully"
fi

# Check if either job failed
if [ "${pr_exit}" -ne 0 ] || [ "${master_exit}" -ne 0 ]; then
echo "ERROR: One or both benchmark jobs failed: pr_exit=${pr_exit}, master_exit=${master_exit}"
exit 1
fi

echo "=========================================="
echo "Both benchmark jobs completed successfully!"
echo "=========================================="

# Final verification that output files exist before proceeding
pr_yaml="pr/bench-${device}-${interface}.yaml"
master_yaml="master/bench-${device}-${interface}.yaml"

if [ ! -f "$pr_yaml" ]; then
echo "ERROR: PR benchmark output not found: $pr_yaml"
ls -la pr/ || true
exit 1
fi

if [ ! -f "$master_yaml" ]; then
echo "ERROR: Master benchmark output not found: $master_yaml"
ls -la master/ || true
exit 1
fi

echo "Verified both YAML files exist:"
echo " - $pr_yaml"
echo " - $master_yaml"

51 changes: 51 additions & 0 deletions .github/scripts/submit_and_monitor_bench.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/bin/bash
# Submit and monitor a benchmark job on a SLURM cluster
# Usage: submit_and_monitor_bench.sh <dir> <device> <interface> <cluster>

set -euo pipefail

if [ $# -ne 4 ]; then
echo "Usage: $0 <dir> <device> <interface> <cluster>"
exit 1
fi

dir="$1"
device="$2"
interface="$3"
cluster="$4"

echo "[$dir] Submitting benchmark for $device-$interface on $cluster..."
cd "$dir"

# Submit job
submit_output=$(bash .github/workflows/$cluster/submit-bench.sh \
.github/workflows/$cluster/bench.sh "$device" "$interface" 2>&1)

job_id=$(echo "$submit_output" | sed -n 's/.*Submitted batch job \([0-9][0-9]*\).*/\1/p')
job_slug="bench-$device-$interface"
output_file="${job_slug}.out"

if [ -z "$job_id" ]; then
echo "[$dir] ERROR: Failed to submit job"
echo "$submit_output"
exit 1
fi

echo "[$dir] Job ID: $job_id, monitoring output file: $output_file"

# Use the monitoring script
bash .github/scripts/monitor_slurm_job.sh "$job_id" "$output_file"

echo "[$dir] Monitoring complete for job $job_id"

# Verify the YAML output file was created
yaml_file="${job_slug}.yaml"
if [ ! -f "$yaml_file" ]; then
echo "[$dir] ERROR: Expected output file not found: $yaml_file"
echo "[$dir] Directory contents:"
ls -la *.yaml 2>/dev/null || echo " No YAML files found"
exit 1
fi

echo "[$dir] Verified output file exists: $yaml_file ($(stat -f%z "$yaml_file" 2>/dev/null || stat -c%s "$yaml_file" 2>/dev/null) bytes)"
Comment on lines 44 to 58
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

YAML file existence check is incomplete — gap vs. PR objectives.

The script verifies the YAML output file exists (lines 41–48) but does not validate its contents. According to the PR objectives, the hardening should include "Check summary file existence and required target-data fields (execution and simulation metrics such as grind)". The current validation only checks for file presence, not schema or required fields.

Consider parsing the YAML and checking for required fields (e.g., execution time, simulation metrics) to align with the stated objectives.

Would you like me to generate a helper function to validate the YAML structure and required fields? This could be added to the script or extracted as a reusable utility.


9 changes: 3 additions & 6 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
filters: ".github/file-filter.yml"

self:
name: "${{ matrix.name }} (${{ matrix.device }})"
name: "${{ matrix.name }} (${{ matrix.device }}${{ matrix.interface != 'none' && format('-{0}', matrix.interface) || '' }})"
if: ${{ github.repository=='MFlowCode/MFC' && needs.file-changes.outputs.checkall=='true' && ((github.event_name=='pull_request_review' && github.event.review.state=='approved') || (github.event_name=='pull_request' && (github.event.pull_request.user.login=='sbryngelson' || github.event.pull_request.user.login=='wilfonba'))) }}
needs: file-changes
strategy:
Expand Down Expand Up @@ -73,7 +73,7 @@ jobs:
runs-on:
group: ${{ matrix.group }}
labels: ${{ matrix.labels }}
timeout-minutes: 1400
timeout-minutes: 480
env:
ACTIONS_RUNNER_FORCE_ACTIONS_NODE_VERSION: node16
ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true
Expand All @@ -98,10 +98,7 @@ jobs:
wait %1 && wait %2

- name: Bench (Master v. PR)
run: |
(cd pr && bash .github/workflows/${{ matrix.cluster }}/submit-bench.sh .github/workflows/${{ matrix.cluster }}/bench.sh ${{ matrix.device }} ${{ matrix.interface }}) &
(cd master && bash .github/workflows/${{ matrix.cluster }}/submit-bench.sh .github/workflows/${{ matrix.cluster }}/bench.sh ${{ matrix.device }} ${{ matrix.interface }}) &
wait %1 && wait %2
run: bash .github/scripts/run_parallel_benchmarks.sh ${{ matrix.device }} ${{ matrix.interface }} ${{ matrix.cluster }}

- name: Generate & Post Comment
run: |
Expand Down
35 changes: 26 additions & 9 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,23 +93,40 @@ jobs:
OPT2: ${{ matrix.debug == 'debug' && '-% 20' || '' }}

self:
name: Self Hosted
name: "${{ matrix.cluster_name }} (${{ matrix.device }}${{ matrix.interface != 'none' && format('-{0}', matrix.interface) || '' }})"
if: github.repository == 'MFlowCode/MFC' && needs.file-changes.outputs.checkall == 'true'
needs: file-changes
continue-on-error: false
timeout-minutes: 1400
timeout-minutes: 480
strategy:
matrix:
device: ['gpu']
interface: ['acc', 'omp']
lbl: ['gt', 'frontier']
include:
- device: 'cpu'
# Phoenix (GT)
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'gpu'
interface: 'acc'
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'gpu'
interface: 'omp'
- lbl: 'gt'
cluster_name: 'Georgia Tech | Phoenix'
device: 'cpu'
interface: 'none'
lbl: 'gt'
- device: 'cpu'
# Frontier (ORNL)
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'gpu'
interface: 'acc'
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'gpu'
interface: 'omp'
- lbl: 'frontier'
cluster_name: 'Oak Ridge | Frontier'
device: 'cpu'
interface: 'none'
lbl: 'frontier'
runs-on:
group: phoenix
labels: ${{ matrix.lbl }}
Expand Down
Loading
Loading