diff --git a/docs/user-tutorial/benchmarks/micro-benchmarks.md b/docs/user-tutorial/benchmarks/micro-benchmarks.md index aa3aa965b..6ed8585d5 100644 --- a/docs/user-tutorial/benchmarks/micro-benchmarks.md +++ b/docs/user-tutorial/benchmarks/micro-benchmarks.md @@ -166,11 +166,17 @@ Supports the use of double unit types and the use of tensor cores. #### Metrics -| Name | Unit | Description | -|-------------------------|----------|------------------------------------------------------------------------------------| -| gpu-burn/time | time (s) | The runtime for gpu-burn test. | -| gpu-burn/gpu_[0-9]_pass | yes/no | The result of the gpu-burn test for each GPU (1: yes, 0: no). | -| gpu-burn/abort | yes/no | Whether or not GPU-burn test aborted before returning GPU results (1: yes, 0: no). | +| Name | Unit | Description | +|-----------------------------------|-----------------|------------------------------------------------------------------------------------------------------------------------------------| +| gpu-burn/time | time (s) | The runtime for gpu-burn test. | +| gpu-burn/gpu_[0-9]_pass | yes/no | The result of the gpu-burn test for each GPU (1: yes, 0: no). | +| gpu-burn/abort | yes/no | Whether or not GPU-burn test aborted before returning GPU results (1: yes, 0: no). | +| gpu__gflops | FLOPS (GFLOPS) | Per-snapshot measured GFLOPS for each gpu at snapshot `snap_idx` (snapshot index increments for each performance summary line). | +| gpu__temp | temperature (C) | Per-snapshot temperature for each gpu at snapshot `snap_idx`. | +| gpu_avg_gflops | FLOPS (GFLOPS) | Average GFLOPS across all snapshots for each gpu. | +| gpu_var_gflops | | Flops variance metric for each gpu across snapshots using (max-min)/avg | +| gpu_max_temp | temperature (C) | Maximum observed temperature for each gpu across all snapshots. | + ### `cpu-hpl` @@ -271,16 +277,16 @@ Measure the memory bandwidth of GPU using the STREAM benchmark. The benchmark te #### Metrics -| Metric Name | Unit | Description | -|------------------------------------------------------------|------------------|-----------------------------------------------------------------------------------------------------------------------------------------| -| STREAM\_COPY\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the copy operation with specified buffer size and block size. | -| STREAM\_SCALE\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the scale operation with specified buffer size and block size. | -| STREAM\_ADD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the add operation with specified buffer size and block size. | -| STREAM\_TRIAD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the triad operation with specified buffer size and block size. | -| STREAM\_COPY\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the copy operation with specified buffer size and block size. | -| STREAM\_SCALE\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the scale operation with specified buffer size and block size. | -| STREAM\_ADD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the add operation with specified buffer size and block size. | -| STREAM\_TRIAD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the triad operation with specified buffer size and block size. | +| Metric Name | Unit | Description | +|-------------------------------------------------------------------------|------------------|--------------------------------------------------------------------------------------------------------------------| +| STREAM\_COPY\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the copy operation with specified buffer size and block size. | +| STREAM\_SCALE\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the scale operation with specified buffer size and block size. | +| STREAM\_ADD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the add operation with specified buffer size and block size. | +| STREAM\_TRIAD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_bw | bandwidth (GB/s) | The fp64 memory bandwidth of the GPU for the triad operation with specified buffer size and block size. | +| STREAM\_COPY\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the copy operation with specified buffer size and block size. | +| STREAM\_SCALE\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the scale operation with specified buffer size and block size. | +| STREAM\_ADD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the add operation with specified buffer size and block size. | +| STREAM\_TRIAD\_double\_gpu\_[0-9]\_buffer\_[0-9]+\_block\_[0-9]+\_ratio | Efficiency (%) | The fp64 memory bandwidth efficiency of the GPU for the triad operation with specified buffer size and block size. | ### `ib-loopback` @@ -413,72 +419,72 @@ performed by [nvbandwidth](https://github.com/NVIDIA/nvbandwidth) #### Metrics -| Metrics | Unit | Description | -|---------------------------------------------------------|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| host_to_device_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Host to device CE memcpy using cuMemcpyAsync | -| host_to_device_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_host_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Device to host CE memcpy using cuMemcpyAsync | -| device_to_host_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| host_to_device_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device copy is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. | -| host_to_device_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_host_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host copy is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. | -| device_to_host_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_memcpy_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. Read tests launch a copy from the peer device to the target using the target's context. | -| device_to_device_memcpy_read_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_memcpy_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. Write tests launch a copy from the target device to the peer using the target's context. | -| device_to_device_memcpy_write_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_bidirectional_memcpy_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. A copy in the opposite direction of the measured copy is run simultaneously but not measured. Read tests launch a copy from the peer device to the target using the target's context. | -| device_to_device_bidirectional_memcpy_read_ce_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_bidirectional_memcpy_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. A copy in the opposite direction of the measured copy is run simultaneously but not measured. Write tests launch a copy from the target device to the peer using the target's context. | -| device_to_device_bidirectional_memcpy_write_ce_sum_bw | GB/s | Sum of the output matrix | -| all_to_host_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between a single device and the host while simultaneously running copies from all other devices to the host. | -| all_to_host_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| all_to_host_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host copy is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic. | -| all_to_host_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| host_to_all_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between the host to a single device while simultaneously running copies from the host to all other devices. | -| host_to_all_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| host_to_all_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device copy is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic. | -| host_to_all_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | -| all_to_one_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | -| all_to_one_write_ce_sum_bw | GB/s | Sum of the output matrix | -| all_to_one_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | -| all_to_one_read_ce_sum_bw | GB/s | Sum of the output matrix | -| one_to_all_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | -| one_to_all_write_ce_sum_bw | GB/s | Sum of the output matrix | -| one_to_all_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | -| one_to_all_read_ce_sum_bw | GB/s | Sum of the output matrix | -| host_to_device_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Host to device SM memcpy using a copy kernel | -| host_to_device_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| device_to_host_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Device to host SM memcpy using a copy kernel | -| device_to_host_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_memcpy_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Read tests launch a copy from the peer device to the target using the target's context. | -| device_to_device_memcpy_read_sm_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_memcpy_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Write tests launch a copy from the target device to the peer using the target's context. | -| device_to_device_memcpy_write_sm_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_bidirectional_memcpy_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Copies are run in both directions between each pair, and the sum is reported. Read tests launch a copy from the peer device to the target using the target's context. | -| device_to_device_bidirectional_memcpy_read_sm_sum_bw | GB/s | Sum of the output matrix | -| device_to_device_bidirectional_memcpy_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Copies are run in both directions between each pair, and the sum is reported. Write tests launch a copy from the target device to the peer using the target's context. | -| device_to_device_bidirectional_memcpy_write_sm_sum_bw | GB/s | Sum of the output matrix | -| all_to_host_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between a single device and the host while simultaneously running copies from all other devices to the host. | -| all_to_host_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| all_to_host_bidirectional_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host bandwidth of a copy kernel is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic using copy kernels. | -| all_to_host_bidirectional_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| host_to_all_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between the host to a single device while simultaneously running copies from the host to all other devices. | -| host_to_all_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| host_to_all_bidirectional_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device bandwidth of a copy kernel is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic using copy kernels. | -| host_to_all_bidirectional_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | -| all_to_one_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | -| all_to_one_write_sm_sum_bw | GB/s | Sum of the output matrix | -| all_to_one_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | -| all_to_one_read_sm_sum_bw | GB/s | Sum of the output matrix | -| one_to_all_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | -| one_to_all_write_sm_sum_bw | GB/s | Sum of the output matrix | -| one_to_all_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | -| one_to_all_read_sm_sum_bw | GB/s | Sum of the output matrix | -| host_device_latency_sm_cpu[0-9]_gpu[0-9]_lat | µs | Host - device SM copy latency using a ptr chase kernel | -| host_device_latency_sm_sum_lat | µs | Sum of the output matrix | -| device_to_device_latency_sm_gpu[0-9]_gpu[0-9]_lat | µs | Measures latency of a pointer dereference operation between each pair of accessible peers. Memory is allocated on a GPU and is accessed by the peer GPU to determine latency. | -| device_to_device_latency_sm_sum_lat | µs | Sum of the output matrix | +| Metrics | Unit | Description | +|---------------------------------------------------------------------|------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| host_to_device_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Host to device CE memcpy using cuMemcpyAsync | +| host_to_device_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_host_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Device to host CE memcpy using cuMemcpyAsync | +| device_to_host_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| host_to_device_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device copy is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. | +| host_to_device_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_host_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host copy is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. | +| device_to_host_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_memcpy_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. Read tests launch a copy from the peer device to the target using the target's context. | +| device_to_device_memcpy_read_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_memcpy_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. Write tests launch a copy from the target device to the peer using the target's context. | +| device_to_device_memcpy_write_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_bidirectional_memcpy_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. A copy in the opposite direction of the measured copy is run simultaneously but not measured. Read tests launch a copy from the peer device to the target using the target's context. | +| device_to_device_bidirectional_memcpy_read_ce_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_bidirectional_memcpy_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between each pair of accessible peers. A copy in the opposite direction of the measured copy is run simultaneously but not measured. Write tests launch a copy from the target device to the peer using the target's context. | +| device_to_device_bidirectional_memcpy_write_ce_sum_bw | GB/s | Sum of the output matrix | +| all_to_host_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between a single device and the host while simultaneously running copies from all other devices to the host. | +| all_to_host_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| all_to_host_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host copy is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic. | +| all_to_host_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| host_to_all_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of cuMemcpyAsync between the host to a single device while simultaneously running copies from the host to all other devices. | +| host_to_all_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| host_to_all_bidirectional_memcpy_ce_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device copy is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic. | +| host_to_all_bidirectional_memcpy_ce_sum_bw | GB/s | Sum of the output matrix | +| all_to_one_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | +| all_to_one_write_ce_sum_bw | GB/s | Sum of the output matrix | +| all_to_one_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | +| all_to_one_read_ce_sum_bw | GB/s | Sum of the output matrix | +| one_to_all_write_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | +| one_to_all_write_ce_sum_bw | GB/s | Sum of the output matrix | +| one_to_all_read_ce_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | +| one_to_all_read_ce_sum_bw | GB/s | Sum of the output matrix | +| host_to_device_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Host to device SM memcpy using a copy kernel | +| host_to_device_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| device_to_host_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Device to host SM memcpy using a copy kernel | +| device_to_host_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_memcpy_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Read tests launch a copy from the peer device to the target using the target's context. | +| device_to_device_memcpy_read_sm_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_memcpy_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Write tests launch a copy from the target device to the peer using the target's context. | +| device_to_device_memcpy_write_sm_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_bidirectional_memcpy_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Copies are run in both directions between each pair, and the sum is reported. Read tests launch a copy from the peer device to the target using the target's context. | +| device_to_device_bidirectional_memcpy_read_sm_sum_bw | GB/s | Sum of the output matrix | +| device_to_device_bidirectional_memcpy_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between each pair of accessible peers. Copies are run in both directions between each pair, and the sum is reported. Write tests launch a copy from the target device to the peer using the target's context. | +| device_to_device_bidirectional_memcpy_write_sm_sum_bw | GB/s | Sum of the output matrix | +| all_to_host_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between a single device and the host while simultaneously running copies from all other devices to the host. | +| all_to_host_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| all_to_host_bidirectional_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | A device to host bandwidth of a copy kernel is measured while a host to device copy is run simultaneously. Only the device to host copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic using copy kernels. | +| all_to_host_bidirectional_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| host_to_all_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | Measures bandwidth of a copy kernel between the host to a single device while simultaneously running copies from the host to all other devices. | +| host_to_all_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| host_to_all_bidirectional_memcpy_sm_cpu[0-9]_gpu[0-9]_bw | GB/s | A host to device bandwidth of a copy kernel is measured while a device to host copy is run simultaneously. Only the host to device copy bandwidth is reported. All other devices generate simultaneous host to device and device to host interfering traffic using copy kernels. | +| host_to_all_bidirectional_memcpy_sm_sum_bw | GB/s | Sum of the output matrix | +| all_to_one_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | +| all_to_one_write_sm_sum_bw | GB/s | Sum of the output matrix | +| all_to_one_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from all accessible peers to a single device, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | +| all_to_one_read_sm_sum_bw | GB/s | Sum of the output matrix | +| one_to_all_write_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total outbound bandwidth for each device. Write tests launch a copy from the target device to the peer using the target's context. | +| one_to_all_write_sm_sum_bw | GB/s | Sum of the output matrix | +| one_to_all_read_sm_gpu[0-9]_gpu[0-9]_bw | GB/s | Measures the total bandwidth of copies from a single device to all accessible peers, for each device. Bandwidth is reported as the total inbound bandwidth for each device. Read tests launch a copy from the peer device to the target using the target's context. | +| one_to_all_read_sm_sum_bw | GB/s | Sum of the output matrix | +| host_device_latency_sm_cpu[0-9]_gpu[0-9]_lat | µs | Host - device SM copy latency using a ptr chase kernel | +| host_device_latency_sm_sum_lat | µs | Sum of the output matrix | +| device_to_device_latency_sm_gpu[0-9]_gpu[0-9]_lat | µs | Measures latency of a pointer dereference operation between each pair of accessible peers. Memory is allocated on a GPU and is accessed by the peer GPU to determine latency. | +| device_to_device_latency_sm_sum_lat | µs | Sum of the output matrix | ## Computation-communication Benchmarks diff --git a/superbench/benchmarks/micro_benchmarks/gpu_burn_test.py b/superbench/benchmarks/micro_benchmarks/gpu_burn_test.py index fba4ad2b3..7e7ca6378 100644 --- a/superbench/benchmarks/micro_benchmarks/gpu_burn_test.py +++ b/superbench/benchmarks/micro_benchmarks/gpu_burn_test.py @@ -4,6 +4,7 @@ """Module of the GPU-Burn Test.""" import os +import re from superbench.common.utils import logger from superbench.benchmarks import BenchmarkRegistry, Platform @@ -45,6 +46,12 @@ def add_parser_arguments(self): default=10, help='Length of time to run GPU-Burn for(in seconds)', ) + self._parser.add_argument( + '--warmup_iters', + type=int, + default=0, + help='Number of warmup iterations before performance measurement', + ) def _preprocess(self): """Preprocess/preparation operations before the benchmarking. @@ -88,7 +95,9 @@ def _process_raw_result(self, cmd_idx, raw_output): # noqa: C901 abort = False failure_msg = 'unknown failure' index = -1 + try: + # detect fatal failure lines for idx, line in enumerate(content): if 'No clients are alive!' in line or "Couldn't init a GPU" \ in line or 'Failure during compute' in line or 'Low mem for result' in line: @@ -124,6 +133,61 @@ def _process_raw_result(self, cmd_idx, raw_output): # noqa: C901 self._result.add_raw_data('GPU Burn Failure: ', failure_msg, self._args.log_raw_data) self._result.add_result('abort', 1) return False + + # Parse and emit metrics for every perf snapshot + # Find all performance snapshot lines containing Gflop/s + perf_lines = [line for line in raw_output.splitlines() if 'Gflop/s' in line] + per_gpu_flops, per_gpu_temps = {}, {} + num_gpus = 0 + for snap_idx, perf_line in enumerate(perf_lines): + # extract per-GPU Gflops values like '(581623 Gflop/s)' + gflops = re.findall(r'\(([0-9]+(?:\.[0-9]+)?)\s*Gflop/s\)', perf_line) + gflops = [float(x) for x in gflops] + # extract temps: 'temps: 48 C - 49 C - 49 C - 49 C' + temps = [] + temp_match = re.search(r'temps:\s*(.+)$', perf_line) + if temp_match: + temps = [] + for t in temp_match.group(1).split(' - '): + match = re.search(r'(\d+)', t) + if match: + temps.append(int(match.group(1))) + + # Save snapshot raw line + self._result.add_raw_data(f'GPU-Burn_perf_snapshot_{snap_idx}', perf_line, self._args.log_raw_data) + + # Emit per-GPU metrics for this snapshot + num_gpus = max(len(gflops), len(temps), num_gpus) + for i in range(num_gpus): + if i not in per_gpu_flops: + per_gpu_flops[i] = [] + if i not in per_gpu_temps: + per_gpu_temps[i] = [] + if i < len(gflops) and gflops[i] > 0: + self._result.add_result(f'gpu_{snap_idx}_gflops:{i}', gflops[i]) + if snap_idx > self._args.warmup_iters: + per_gpu_flops[i].append(gflops[i]) + else: + self._result.add_result(f'gpu_{snap_idx}_gflops:{i}', 0.0) + if i < len(temps): + self._result.add_result(f'gpu_{snap_idx}_temp:{i}', temps[i]) + per_gpu_temps[i].append(temps[i]) + else: + self._result.add_result(f'gpu_{snap_idx}_temp:{i}', -1) + for i in per_gpu_flops: + if len(per_gpu_flops[i]) > 0: + avg_flops = sum(per_gpu_flops[i]) / len(per_gpu_flops[i]) + self._result.add_result(f'gpu_avg_gflops:{i}', avg_flops) + if avg_flops != 0: + self._result.add_result( + f'gpu_var_gflops:{i}', (max(per_gpu_flops[i]) - min(per_gpu_flops[i])) / avg_flops + ) + else: + self._result.add_result(f'gpu_var_gflops:{i}', 0.0) + for i in per_gpu_temps: + if len(per_gpu_temps[i]) > 0: + self._result.add_result(f'gpu_max_temp:{i}', max(per_gpu_temps[i])) + except BaseException as e: logger.error( 'The result format is invalid - round: {}, benchmark: {}, raw output: {}, message: {}.'.format( diff --git a/tests/benchmarks/micro_benchmarks/test_gpu_burn_test.py b/tests/benchmarks/micro_benchmarks/test_gpu_burn_test.py index 3ec352c4d..82454ee53 100644 --- a/tests/benchmarks/micro_benchmarks/test_gpu_burn_test.py +++ b/tests/benchmarks/micro_benchmarks/test_gpu_burn_test.py @@ -29,7 +29,7 @@ def test_gpu_burn(self, results): time = 10 - parameters = '--doubles --tensor_core --time ' + str(time) + parameters = '--doubles --tensor_core --warmup_iters 128 --time ' + str(time) benchmark = benchmark_class(benchmark_name, parameters=parameters) # Check basic information @@ -57,4 +57,12 @@ def test_gpu_burn(self, results): assert (benchmark.result['time'][0] == time) for device in range(8): assert (benchmark.result['gpu_' + str(device) + '_pass'][0] == 1) + assert ('gpu_max_temp:' + str(device) in benchmark.result) + assert (benchmark.result['gpu_max_temp:' + str(device)][0] >= 50) + assert ('gpu_avg_gflops:' + str(device) in benchmark.result) + assert (benchmark.result['gpu_avg_gflops:' + str(device)][0] >= 16000) + assert ('gpu_var_gflops:' + str(device) in benchmark.result) + assert (benchmark.result['gpu_var_gflops:' + str(device)][0] <= 0.01) + assert ('gpu_195_gflops:' + str(device) in benchmark.result) + assert ('gpu_195_temp:' + str(device) in benchmark.result) assert (benchmark.result['abort'][0] == 0)