|
| 1 | +""" |
| 2 | +pytest configuration for the PythonBPF test suite. |
| 3 | +
|
| 4 | +Test discovery: |
| 5 | + All .py files under tests/passing_tests/ and tests/failing_tests/ are |
| 6 | + collected as parametrized BPF test cases. |
| 7 | +
|
| 8 | +Markers applied automatically from test_config.toml: |
| 9 | + - xfail (strict=True): failing_tests/ entries that are expected to fail |
| 10 | + - skip: vmlinux tests when vmlinux.py is not importable |
| 11 | +
|
| 12 | +Run the suite: |
| 13 | + pytest tests/ -v -m "not verifier" # IR + LLC only (no sudo) |
| 14 | + pytest tests/ -v --cov=pythonbpf # with coverage |
| 15 | + pytest tests/test_verifier.py -m verifier # kernel verifier (sudo required) |
| 16 | +""" |
| 17 | + |
| 18 | +import logging |
| 19 | + |
| 20 | +import pytest |
| 21 | + |
| 22 | +from tests.framework.collector import collect_all_test_files |
| 23 | + |
| 24 | +# ── vmlinux availability ──────────────────────────────────────────────────── |
| 25 | + |
| 26 | +try: |
| 27 | + import vmlinux # noqa: F401 |
| 28 | + |
| 29 | + VMLINUX_AVAILABLE = True |
| 30 | +except ImportError: |
| 31 | + VMLINUX_AVAILABLE = False |
| 32 | + |
| 33 | + |
| 34 | +# ── shared fixture: collected test cases ─────────────────────────────────── |
| 35 | + |
| 36 | + |
| 37 | +def _all_cases(): |
| 38 | + return collect_all_test_files() |
| 39 | + |
| 40 | + |
| 41 | +# ── pytest_generate_tests: parametrize on bpf_test_file ─────────────────── |
| 42 | + |
| 43 | + |
| 44 | +def pytest_generate_tests(metafunc): |
| 45 | + if "bpf_test_file" in metafunc.fixturenames: |
| 46 | + cases = _all_cases() |
| 47 | + metafunc.parametrize( |
| 48 | + "bpf_test_file", |
| 49 | + [c.path for c in cases], |
| 50 | + ids=[c.rel_path for c in cases], |
| 51 | + ) |
| 52 | + |
| 53 | + |
| 54 | +# ── pytest_collection_modifyitems: apply xfail / skip markers ───────────── |
| 55 | + |
| 56 | + |
| 57 | +def pytest_collection_modifyitems(items): |
| 58 | + case_map = {c.rel_path: c for c in _all_cases()} |
| 59 | + |
| 60 | + for item in items: |
| 61 | + # Resolve the test case from the parametrize ID embedded in the node id. |
| 62 | + # Node id format: tests/test_foo.py::test_bar[passing_tests/helpers/pid.py] |
| 63 | + case = None |
| 64 | + for bracket in (item.callspec.id,) if hasattr(item, "callspec") else (): |
| 65 | + case = case_map.get(bracket) |
| 66 | + break |
| 67 | + |
| 68 | + if case is None: |
| 69 | + continue |
| 70 | + |
| 71 | + # vmlinux skip |
| 72 | + if case.needs_vmlinux and not VMLINUX_AVAILABLE: |
| 73 | + item.add_marker( |
| 74 | + pytest.mark.skip(reason="vmlinux.py not available for current kernel") |
| 75 | + ) |
| 76 | + continue |
| 77 | + |
| 78 | + # xfail (strict: XPASS counts as a test failure, alerting us to fixed bugs) |
| 79 | + if case.is_expected_fail: |
| 80 | + # Level "ir" → fails at IR generation: xfail both IR and LLC tests |
| 81 | + # Level "llc" → IR succeeds but LLC fails: only xfail the LLC test |
| 82 | + is_llc_test = item.nodeid.startswith("tests/test_llc_compilation.py") |
| 83 | + |
| 84 | + apply_xfail = (case.xfail_level == "ir") or ( |
| 85 | + case.xfail_level == "llc" and is_llc_test |
| 86 | + ) |
| 87 | + if apply_xfail: |
| 88 | + item.add_marker( |
| 89 | + pytest.mark.xfail( |
| 90 | + reason=case.xfail_reason, |
| 91 | + strict=True, |
| 92 | + raises=Exception, |
| 93 | + ) |
| 94 | + ) |
| 95 | + |
| 96 | + |
| 97 | +# ── caplog level fixture: capture ERROR+ from pythonbpf ─────────────────── |
| 98 | + |
| 99 | + |
| 100 | +@pytest.fixture(autouse=True) |
| 101 | +def set_log_level(caplog): |
| 102 | + with caplog.at_level(logging.ERROR, logger="pythonbpf"): |
| 103 | + yield |
0 commit comments