Skip to content

Commit ea3d4fc

Browse files
Merge pull request #203 from KernelTuner/skip_nvml_tests
Skip nvml tests
2 parents a6f7148 + 04548ca commit ea3d4fc

File tree

3 files changed

+11
-2
lines changed

3 files changed

+11
-2
lines changed

test/context.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,12 @@
1111
except Exception:
1212
pycuda_present = False
1313

14+
try:
15+
import pynvml
16+
pynvml_present = True
17+
except ImportError:
18+
pynvml_present = False
19+
1420
try:
1521
import pyopencl
1622
opencl_present = True
@@ -39,6 +45,7 @@
3945
cuda_present = False
4046

4147
skip_if_no_pycuda = pytest.mark.skipif(not pycuda_present, reason="PyCuda not installed or no CUDA device detected")
48+
skip_if_no_pynvml = pytest.mark.skipif(not pynvml_present, reason="NVML not installed")
4249
skip_if_no_cupy = pytest.mark.skipif(not cupy_present, reason="CuPy not installed or no CUDA device detected")
4350
skip_if_no_cuda = pytest.mark.skipif(not cuda_present, reason="NVIDIA CUDA not installed")
4451
skip_if_no_opencl = pytest.mark.skipif(not opencl_present, reason="PyOpenCL not installed or no OpenCL device detected")

test/test_energy.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
import os
2-
from .context import skip_if_no_pycuda
2+
from .context import skip_if_no_pycuda, skip_if_no_pynvml
33
from kernel_tuner.energy import energy
44

55

66
cache_filename = os.path.dirname(os.path.realpath(__file__)) + "/synthetic_fp32_cache_NVIDIA_RTX_A4000.json"
77

88
@skip_if_no_pycuda
9+
@skip_if_no_pynvml
910
def test_create_power_frequency_model():
1011

1112
ridge_frequency, freqs, nvml_power, fitted_params, scaling = energy.create_power_frequency_model(cache=cache_filename, simulation_mode=True)

test/test_observers.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,12 @@
55
from kernel_tuner.observers.nvml import NVMLObserver
66
from kernel_tuner.observers.observer import BenchmarkObserver
77

8-
from .context import skip_if_no_pycuda
8+
from .context import skip_if_no_pycuda, skip_if_no_pynvml
99
from .test_runners import env
1010

1111

1212
@skip_if_no_pycuda
13+
@skip_if_no_pynvml
1314
def test_nvml_observer(env):
1415
nvmlobserver = NVMLObserver(["nvml_energy", "temperature"])
1516
env[-1]["block_size_x"] = [128]

0 commit comments

Comments
 (0)