Skip to content

Commit 7281d65

Browse files
committed
test(simulation): add more unit and functional tests based on the template
1 parent b7df057 commit 7281d65

File tree

2 files changed

+304
-0
lines changed

2 files changed

+304
-0
lines changed

tests/test_functionaltest.py

Lines changed: 239 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,15 @@
55
functionality.
66
"""
77

8+
from joblib import cpu_count
9+
import numpy as np
10+
import pandas as pd
811
import pytest
912

1013
from simulation.parameters import (ASUArrivals, RehabArrivals, ASULOS,
1114
RehabLOS, Param)
1215
from simulation.model import Model
16+
from simulation.runner import Runner
1317

1418

1519
@pytest.mark.parametrize("warm_up_period", [(0), (1)])
@@ -117,3 +121,238 @@ def test_long_los(stroke_no_esd_mean):
117121

118122

119123
# TODO: test_ev_5/6/7/8 - requires patient counts by unit and patient type
124+
125+
def test_warmup_only():
126+
"""
127+
Check that results are as expected if model is run with no data collection
128+
period, and only a warm-up period.
129+
130+
Notes
131+
-----
132+
Inspired by `test_warmup_only` in
133+
github.com/pythonhealthdatascience/rap_template_python_des/.
134+
"""
135+
# Run with warm-up period only
136+
param = Param(warm_up_period=500, data_collection_period=0)
137+
model = Model(param=param, run_number=0)
138+
model.run()
139+
140+
# Check that there are no entries for patients or audit list
141+
assert len(model.patients) == 0
142+
assert len(model.audit_list) == 0
143+
144+
# But that current occupancy is positive, as it reflects patients who
145+
# arrived during warm-up still occupying space
146+
assert model.asu_occupancy > 0
147+
assert model.rehab_occupancy > 0
148+
149+
150+
def test_warmup_impact():
151+
"""
152+
Check that running with warm-up leads to different results than without.
153+
154+
Notes
155+
-----
156+
Adapted from `test_warmup_impact` in
157+
github.com/pythonhealthdatascience/rap_template_python_des/.
158+
"""
159+
def helper_warmup(warm_up_period):
160+
"""
161+
Helper function to run model with high arrivals and specified warm-up.
162+
163+
Arguments:
164+
warm_up_period (float):
165+
Duration of the warm-up period - running simulation but not yet
166+
collecting results.
167+
"""
168+
param = Param(warm_up_period=warm_up_period,
169+
data_collection_period=1500)
170+
model = Model(param=param, run_number=0)
171+
model.run()
172+
return model
173+
174+
# Run model with and without warm-up period
175+
model_warmup = helper_warmup(warm_up_period=500)
176+
model_none = helper_warmup(warm_up_period=0)
177+
178+
# Extract result of first patient
179+
first_warmup = model_warmup.patients[0]
180+
first_none = model_none.patients[0]
181+
182+
# Check that model with warm-up has arrival time later than warm-up length
183+
assert first_warmup.asu_arrival_time > 500, (
184+
"Expect first patient to arrive after time 500 when model is run " +
185+
f"with warm-up (length 500), but got {first_warmup.asu_arrival_time}."
186+
)
187+
188+
# Check that model without warm-up has arrival first arrival after time 0
189+
assert first_none.asu_arrival_time > 0, (
190+
"Expect first patient to arrive after time 0 when model is run " +
191+
f"without warm-up, but got {first_none.asu_arrival_time}."
192+
)
193+
194+
# Check that the first interval audit entry with no warm-up has occupancy
195+
# 0, but first entry with warm-up has occupancy > 0
196+
assert model_warmup.audit_list[0]["asu_occupancy"] > 0
197+
assert model_warmup.audit_list[0]["rehab_occupancy"] > 0
198+
assert model_none.audit_list[0]["asu_occupancy"] == 0
199+
assert model_none.audit_list[0]["rehab_occupancy"] == 0
200+
201+
# Check time of first entry in the audit lists
202+
assert model_warmup.audit_list[0]["time"] == 500
203+
assert model_none.audit_list[0]["time"] == 0
204+
205+
206+
def test_changing_occupancy():
207+
"""
208+
Test that adjusting parameters alters the observed occupancy.
209+
210+
Notes
211+
-----
212+
Inspired by `test_arrivals_decrease` in
213+
github.com/pythonhealthdatascience/rap_template_python_des/.
214+
"""
215+
# Run model with lots of stroke arrivals (IAT 1) and fewer (IAT 100)
216+
initial_model = Model(Param(ASUArrivals(stroke=1)), run_number=0)
217+
initial_model.run()
218+
adj_model = Model(Param(ASUArrivals(stroke=100)), run_number=0)
219+
adj_model.run()
220+
221+
# Check that patient list is longer with more arrivals
222+
i_n = len(initial_model.patients)
223+
a_n = len(adj_model.patients)
224+
assert i_n > a_n, (
225+
"Expect high IAT to have more patients than low IAT, but " +
226+
f"saw {i_n} for high and {a_n} for low"
227+
)
228+
229+
# Check that sum of the audit occupancies is higher with more arrivals
230+
initial_audit = pd.DataFrame(initial_model.audit_list)
231+
adj_audit = pd.DataFrame(adj_model.audit_list)
232+
233+
i_asu = initial_audit["asu_occupancy"].sum()
234+
a_asu = adj_audit["asu_occupancy"].sum()
235+
assert i_asu > a_asu, (
236+
"Expect high IAT to have higher ASU occupancy than low IAT, but " +
237+
f"saw {i_asu} for high and {a_asu} for low")
238+
239+
i_reh = initial_audit["rehab_occupancy"].sum()
240+
a_reh = adj_audit["rehab_occupancy"].sum()
241+
assert i_reh > a_reh, (
242+
"Expect high IAT to have higher rehab occupancy than low IAT, but " +
243+
f"saw {i_reh} for high and {a_reh} for low")
244+
245+
246+
def test_seed_stability():
247+
"""
248+
Check that two runs using the same random seed return the same results.
249+
250+
Notes
251+
-----
252+
Adapted from `seed_seed_stability` in
253+
github.com/pythonhealthdatascience/rap_template_python_des/.
254+
"""
255+
# Run model twice, with same run number (and therefore same seed) each time
256+
runner1 = Runner(param=Param())
257+
result1 = runner1.run_single(run=33)
258+
runner2 = Runner(param=Param())
259+
result2 = runner2.run_single(run=33)
260+
261+
# Check that the dataframes are equal
262+
pd.testing.assert_frame_equal(result1["asu"], result2["asu"])
263+
pd.testing.assert_frame_equal(result1["rehab"], result2["rehab"])
264+
265+
266+
def test_parallel():
267+
"""
268+
Check that sequential and parallel execution produce consistent results.
269+
270+
Notes
271+
-----
272+
Adapted from `test_parallel` in
273+
github.com/pythonhealthdatascience/rap_template_python_des/.
274+
"""
275+
# Sequential (1 core) and parallel (-1 cores) execution
276+
results = {}
277+
for mode, cores in [("seq", 1), ("par", -1)]:
278+
param = Param(cores=cores)
279+
runner = Runner(param)
280+
results[mode] = runner.run_single(run=0)
281+
282+
# Verify results are identical
283+
pd.testing.assert_frame_equal(results["seq"]["asu"],
284+
results["par"]["asu"])
285+
pd.testing.assert_frame_equal(results["seq"]["rehab"],
286+
results["par"]["rehab"])
287+
288+
289+
@pytest.mark.parametrize("cores", [
290+
(-2), (0), (cpu_count()), (cpu_count()+1)
291+
])
292+
def test_valid_cores(cores):
293+
"""
294+
Check there is error handling for input of invalid number of cores.
295+
296+
Notes
297+
-----
298+
Copied from github.com/pythonhealthdatascience/rap_template_python_des/.
299+
"""
300+
param = Param(cores=cores)
301+
runner = Runner(param)
302+
with pytest.raises(ValueError):
303+
runner.run_reps()
304+
305+
306+
def test_sampled_distributions():
307+
"""
308+
Ensure that the mean of sampled values from arrival_dist, los_dist, and
309+
routing_dist are close to their expected values.
310+
311+
Notes
312+
-----
313+
Adapted from `test_sampled_times` in
314+
github.com/pythonhealthdatascience/rap_template_python_des/.
315+
"""
316+
param = Param()
317+
model = Model(param, run_number=0)
318+
319+
# Test arrival_dist
320+
for unit in ['asu', 'rehab']:
321+
for patient_type, dist in model.arrival_dist[unit].items():
322+
samples = [dist.sample() for _ in range(10000)]
323+
observed_mean = np.mean(samples)
324+
expected_mean = getattr(
325+
getattr(param, f"{unit}_arrivals"), patient_type)
326+
assert np.isclose(observed_mean, expected_mean, rtol=0.05), (
327+
f"Expected mean arrival time for {unit} {patient_type} ≈ "
328+
f"{expected_mean}, but got {observed_mean}.")
329+
330+
# Test los_dist
331+
for unit in ['asu', 'rehab']:
332+
for patient_type, dist in model.los_dist[unit].items():
333+
samples = [dist.sample() for _ in range(10000)]
334+
observed_mean = np.mean(samples)
335+
expected_mean = getattr(
336+
getattr(param, f"{unit}_los"), patient_type)["mean"]
337+
assert np.isclose(observed_mean, expected_mean, rtol=0.05), (
338+
f"Expected mean LOS for {unit} {patient_type} ≈ "
339+
f"{expected_mean}, but got {observed_mean}.")
340+
341+
# Test routing_dist
342+
for unit in ['asu', 'rehab']:
343+
for patient_type, dist in model.routing_dist[unit].items():
344+
samples = [dist.sample() for _ in range(10000)]
345+
observed_probs = {dest: samples.count(dest) / len(samples)
346+
for dest in set(samples)}
347+
expected_probs = getattr(
348+
getattr(param, f"{unit}_routing"), patient_type)
349+
for dest in expected_probs:
350+
# If a destination has a very low probability, it might not
351+
# appear in samples. In that case, set the observed probability
352+
# to 0
353+
observed_prob = observed_probs.get(dest, 0)
354+
assert np.isclose(observed_prob,
355+
expected_probs[dest], atol=0.05), (
356+
f"Expected routing probability for {unit} {patient_type} "
357+
f"to {dest}{expected_probs[dest]}, but got "
358+
f"{observed_prob}.")

tests/test_unittest.py

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,11 @@
66
work as intended.
77
"""
88

9+
from io import StringIO
10+
import logging
11+
import os
12+
from unittest.mock import patch, MagicMock
13+
914
import numpy as np
1015
import pytest
1116
from sim_tools.distributions import Exponential, Lognormal, Discrete
@@ -15,6 +20,7 @@
1520
ASURouting, RehabRouting, Param)
1621
from simulation.model import Model
1722
from simulation.runner import Runner
23+
from simulation.logging import SimLogger
1824

1925

2026
# -----------------------------------------------------------------------------
@@ -243,3 +249,62 @@ def test_get_occupancy_freq():
243249

244250
# Check prob_delay calculation
245251
assert np.allclose(result_df["prob_delay"], expected_prob_delay)
252+
253+
254+
# -----------------------------------------------------------------------------
255+
# SimLogger
256+
# -----------------------------------------------------------------------------
257+
258+
def test_log_to_console():
259+
"""
260+
Confirm that logger.log() prints the provided message to the console.
261+
262+
Notes
263+
-----
264+
Test from github.com/pythonhealthdatascience/rap_template_python_des.
265+
"""
266+
with patch('sys.stdout', new_callable=StringIO) as mock_stdout:
267+
logger = SimLogger(log_to_console=True)
268+
logger.log(sim_time=None, msg="Test console log")
269+
# Check if console output matches
270+
assert "Test console log" in mock_stdout.getvalue()
271+
272+
273+
def test_log_to_file():
274+
"""
275+
Confirm that logger.log() would output the message to a .log file at the
276+
provided file path.
277+
278+
Notes
279+
-----
280+
Test from github.com/pythonhealthdatascience/rap_template_python_des.
281+
"""
282+
# Mock the file open operation
283+
with patch('builtins.open', new_callable=MagicMock) as mock_open:
284+
# Create the logger and log a simple example
285+
logger = SimLogger(log_to_file=True, file_path="test.log")
286+
logger.log(sim_time=None, msg="Log message")
287+
288+
# Check that the file was opened in write mode at the absolute path
289+
mock_open.assert_called_with(
290+
os.path.abspath("test.log"), "w", encoding="locale", errors=None)
291+
292+
# Verify a FileHandler is attached to the logger
293+
assert (any(isinstance(handler, logging.FileHandler)
294+
for handler in logger.logger.handlers))
295+
296+
297+
def test_invalid_path():
298+
"""
299+
Ensure there is appropriate error handling for an invalid file path.
300+
"""
301+
with pytest.raises(ValueError):
302+
SimLogger(log_to_file=True, file_path="/invalid/path/to/log.log")
303+
304+
305+
def test_invalid_file_extension():
306+
"""
307+
Ensure there is appropriate error handling for an invalid file extension.
308+
"""
309+
with pytest.raises(ValueError):
310+
SimLogger(log_to_file=True, file_path="test.txt")

0 commit comments

Comments
 (0)