diff --git a/docs/source/io_formats/settings_statepoint_latest.rst b/docs/source/io_formats/settings_statepoint_latest.rst new file mode 100644 index 00000000000..7e46b30f004 --- /dev/null +++ b/docs/source/io_formats/settings_statepoint_latest.rst @@ -0,0 +1,77 @@ +.. _usersguide_statepoint_latest: + +Running Statepoints +=================== + +OpenMC can be configured to keep running statepoint files that capture the state +of the most recently completed batches. This is useful when you want access to +results from the most recent batches even if a simulation is interrupted +unexpectedly, allowing easy restart or analysis. + +To enable this feature, specify a negative batch number in the ```` +element within ````. A value of ``-N`` means "keep the last N +completed batches as running statepoint files." + +Examples + +Keep the last batch completed: + +.. code-block:: xml + + + -1 + + +Keep the last two completed batches: + +.. code-block:: xml + + + -2 + + +You can also mix positive and negative batches. Positive batches specify +explicit intervals to write statepoints, while negative values keep a rolling +window of recent batches: + +.. code-block:: xml + + + 10 20 30 -3 + + +In this case, statepoints are written at batches 10, 20, and 30, and the last 3 +completed batches are also retained as running statepoints. + +File Naming +----------- + +Running statepoint files are named ``statepoint.running..h5``, where +```` is the batch number. For example: + +- Batch 1: ``statepoint.running.1.h5`` +- Batch 2: ``statepoint.running.2.h5`` +- Batch 5: ``statepoint.running.5.h5`` + +Pruning +------- + +When you specify ``-N``, OpenMC automatically keeps only the most recent ``N`` +running statepoints. Older running statepoint files are automatically deleted +when a new batch completes and the number of running statepoints exceeds ``N``. + +For example, with ``-2`` and if batches 1 through 5 complete: + +- After batch 1 completes: ``statepoint.running.1.h5`` exists +- After batch 2 completes: ``statepoint.running.1.h5``, ``statepoint.running.2.h5`` exist +- After batch 3 completes: ``statepoint.running.2.h5``, ``statepoint.running.3.h5`` exist (batch 1 file deleted) +- After batch 4 completes: ``statepoint.running.3.h5``, ``statepoint.running.4.h5`` exist (batch 2 file deleted) +- After batch 5 completes: ``statepoint.running.4.h5``, ``statepoint.running.5.h5`` exist (batch 3 file deleted) + +Remarks +------- + +Running statepoints are written in addition to any permanent statepoint files +specified by positive batch numbers. Each running statepoint file is named with +its batch number, making it easy to identify which batch's results are +contained in each file. diff --git a/docs/source/usersguide/settings.rst b/docs/source/usersguide/settings.rst index f973f146552..a8b81135f21 100644 --- a/docs/source/usersguide/settings.rst +++ b/docs/source/usersguide/settings.rst @@ -651,6 +651,19 @@ As an example, to write a statepoint file every five batches:: settings.batches = n settings.statepoint = {'batches': range(5, n + 5, 5)} +Additionally, you can specify negative batch numbers to keep running statepoints +for the most recent batches. For example, to keep the last 2 completed batches:: + + settings.statepoint = {'batches': -2} + +Or to write statepoints at specific batches and also keep the last 3 completed +batches, you can mix positive and negative values:: + + settings.statepoint = {'batches': [10, 20, 30, -3]} + +See :ref:`usersguide_statepoint_latest` for detailed information on running +statepoints, file naming, and automatic pruning behavior. + Particle Track Files -------------------- diff --git a/openmc/settings.py b/openmc/settings.py index 289fb35b731..a35f33fcd5b 100644 --- a/openmc/settings.py +++ b/openmc/settings.py @@ -262,6 +262,8 @@ class Settings: Options for writing state points. Acceptable keys are: :batches: list of batches at which to write statepoint files + Positive integers write statepoints at specified batches. + Negative integer -N keeps running statepoints for the last N batches. surf_source_read : dict Options for reading surface source points. Acceptable keys are: @@ -760,6 +762,33 @@ def output(self, output: dict): cv.check_type("output['path']", value, str) self._output = output + @property + def statepoint(self) -> dict: + """Dictionary of statepoint options. + + Acceptable keys: + - 'batches': list of batch integers or single integer. + Positive integers: write statepoints at specified batches. + Negative integer -N: keep running statepoints for the last N batches. + """ + return self._statepoint + + @statepoint.setter + def statepoint(self, statepoint: dict): + cv.check_type('statepoint', statepoint, Mapping) + sp = {} + if 'batches' in statepoint: + batches = statepoint['batches'] + if isinstance(batches, Integral): + # Single integer: positive (single batch) or negative (keep last N) + sp['batches'] = [int(batches)] + elif isinstance(batches, Sequence): + # Sequence of integers + sp['batches'] = [int(x) for x in batches] + else: + raise ValueError("statepoint['batches'] must be a sequence or single integer") + self._statepoint = sp + @property def sourcepoint(self) -> dict: return self._sourcepoint @@ -785,23 +814,6 @@ def sourcepoint(self, sourcepoint: dict): "setting sourcepoint options.") self._sourcepoint = sourcepoint - @property - def statepoint(self) -> dict: - return self._statepoint - - @statepoint.setter - def statepoint(self, statepoint: dict): - cv.check_type('statepoint options', statepoint, Mapping) - for key, value in statepoint.items(): - if key == 'batches': - cv.check_type('statepoint batches', value, Iterable, Integral) - for batch in value: - cv.check_greater_than('statepoint batch', batch, 0) - else: - raise ValueError(f"Unknown key '{key}' encountered when " - "setting statepoint options.") - self._statepoint = statepoint - @property def surf_source_read(self) -> dict: return self._surf_source_read @@ -1504,11 +1516,6 @@ def _create_sourcepoint_subelement(self, root): subelement = ET.SubElement(element, "write") subelement.text = str(self._sourcepoint['write']).lower() - # Overwrite latest subelement - if 'overwrite' in self._sourcepoint: - subelement = ET.SubElement(element, "overwrite_latest") - subelement.text = str(self._sourcepoint['overwrite']).lower() - if 'mcpl' in self._sourcepoint: subelement = ET.SubElement(element, "mcpl") subelement.text = str(self._sourcepoint['mcpl']).lower() @@ -2005,11 +2012,9 @@ def _statepoint_from_xml_element(self, root): def _sourcepoint_from_xml_element(self, root): elem = root.find('source_point') if elem is not None: - for key in ('separate', 'write', 'overwrite_latest', 'batches', 'mcpl'): - if key in ('separate', 'write', 'mcpl', 'overwrite_latest'): + for key in ('separate', 'write', 'batches', 'mcpl'): + if key in ('separate', 'write', 'mcpl'): value = get_text(elem, key) in ('true', '1') - if key == 'overwrite_latest': - key = 'overwrite' else: value = get_elem_list(elem, key, int) if value is not None: diff --git a/src/settings.cpp b/src/settings.cpp index 9dcf7c8dbc8..05efbbbcab4 100644 --- a/src/settings.cpp +++ b/src/settings.cpp @@ -4,6 +4,8 @@ #include // for ceil, pow #include // for numeric_limits #include +#include +#include #include #ifdef _OPENMP diff --git a/src/simulation.cpp b/src/simulation.cpp index b536ae5881f..c3c5f8dbdfc 100644 --- a/src/simulation.cpp +++ b/src/simulation.cpp @@ -40,6 +40,8 @@ #include #include +#include +#include #include //============================================================================== @@ -443,6 +445,72 @@ void finalize_batch() } } + // Write running statepoints if user specified negative batch(es) in + // statepoint_batch. A negative value -N means "keep the last N completed + // batches as running statepoints". Check if there's a negative batch number + // in statepoint_batch. + int keep_last_n = 0; + for (int b : settings::statepoint_batch) { + if (b < 0) { + keep_last_n = -b; // Convert to positive count + break; + } + } + + if (keep_last_n > 0 && !settings::cmfd_run) { + bool b = false; + if (contains(settings::sourcepoint_batch, simulation::current_batch) && + settings::source_write && !settings::source_separate) { + b = (settings::run_mode == RunMode::EIGENVALUE); + } + namespace fs = std::filesystem; + + // Construct output directory (use current path if none specified) + fs::path outdir = settings::path_output.empty() + ? fs::current_path() + : fs::path(settings::path_output); + + // Write running statepoint with batch number in filename. + fs::path sp_name = + fmt::format("statepoint.running.{}.h5", simulation::current_batch); + fs::path sp_path = outdir / sp_name; + std::string filename = sp_path.string(); + openmc_statepoint_write(filename.c_str(), &b); + + // Prune older running statepoint files so that at most `keep_last_n` + // remain. Match files like `statepoint.running..h5`. + try { + std::vector> files; // pair(batchnum, path) + static const std::regex re(R"(^statepoint\.running\.(\d+)\.h5$)"); + if (fs::exists(outdir) && fs::is_directory(outdir)) { + for (auto& p : fs::directory_iterator(outdir)) { + auto name = p.path().filename().string(); + std::smatch m; + if (std::regex_match(name, m, re)) { + int batchnum = std::stoi(m[1].str()); + files.emplace_back(batchnum, p.path()); + } + } + } + + // Sort by batch number descending (newest first) + std::sort(files.begin(), files.end(), [](auto& a, auto& b) { + if (a.first != b.first) + return a.first > b.first; + return a.second.string() > b.second.string(); + }); + + // Remove files older than the most recent `keep_last_n` + for (size_t i = static_cast(keep_last_n); i < files.size(); ++i) { + std::error_code ec; + fs::remove(files[i].second, ec); + } + } catch (...) { + // On any filesystem/regex error, ignore pruning so simulation can + // continue + } + } + if (settings::run_mode == RunMode::EIGENVALUE) { // Write out a separate source point if it's been specified for this batch if (contains(settings::sourcepoint_batch, simulation::current_batch) && diff --git a/tests/regression_tests/statepoint_latest/__init__.py b/tests/regression_tests/statepoint_latest/__init__.py new file mode 100644 index 00000000000..d2af06a9a8b --- /dev/null +++ b/tests/regression_tests/statepoint_latest/__init__.py @@ -0,0 +1 @@ +# regression test package diff --git a/tests/regression_tests/statepoint_latest/model.xml b/tests/regression_tests/statepoint_latest/model.xml new file mode 100644 index 00000000000..cc1a595ef99 --- /dev/null +++ b/tests/regression_tests/statepoint_latest/model.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + eigenvalue + 1000 + 5 + 1 + + + -0.63 -0.63 -1 0.63 0.63 1 + + + true + + + + -2 + + + + + 300 300 + 0.0 0.0 0.0 + 1.26 1.26 + + + diff --git a/tests/regression_tests/statepoint_latest/test.py b/tests/regression_tests/statepoint_latest/test.py new file mode 100644 index 00000000000..3c5bf754df8 --- /dev/null +++ b/tests/regression_tests/statepoint_latest/test.py @@ -0,0 +1,44 @@ +import os +import glob + +import tempfile + +import openmc + + +def test_statepoint_latest(): + """Test that negative batch numbers keep running statepoints for last N batches.""" + # Create a temporary directory for this test + with tempfile.TemporaryDirectory() as tmpdir: + # Save current directory and change to temp + original_dir = os.getcwd() + os.chdir(tmpdir) + + try: + # Copy all supporting files from the test directory into temp + import shutil + src_dir = os.path.dirname(__file__) + for fname in os.listdir(src_dir): + if fname == '__pycache__': + continue + src_path = os.path.join(src_dir, fname) + # copy files (XML, HDF5, etc.) into the temp working dir + if os.path.isfile(src_path): + shutil.copy(src_path, fname) + + # Load the model from the single `model.xml` file + model = openmc.Model.from_model_xml('model.xml') + + # Run the model + model.run(output=False) + + # Check that exactly 2 running statepoint files exist + running = sorted(glob.glob('statepoint.running.*.h5')) + assert len(running) == 2, f'Expected 2 running statepoint files, found {len(running)}: {running}' + + # Verify the batch numbers are 4 and 5 (last 2 of 5 batches) + batch_nums = sorted([int(f.split('.')[2]) for f in running]) + assert batch_nums == [4, 5], f'Expected batches [4, 5], found {batch_nums}' + + finally: + os.chdir(original_dir) diff --git a/tests/regression_tests/statepoint_latest/test_pyapi.py b/tests/regression_tests/statepoint_latest/test_pyapi.py new file mode 100644 index 00000000000..51c1e87607a --- /dev/null +++ b/tests/regression_tests/statepoint_latest/test_pyapi.py @@ -0,0 +1,35 @@ +import os +import glob + +import openmc +import openmc.examples + + +def test_statepoint_latest_pyapi(): + """Test that negative batch numbers keep running statepoints for last N batches.""" + # Use the PWR pin cell example model which is known to work + model = openmc.examples.pwr_pin_cell() + + # Set batches: need at least 1 inactive + 1 active + model.settings.inactive = 1 + model.settings.batches = 5 + model.settings.particles = 1000 + + # Enable running statepoints: keep last 2 batches + model.settings.statepoint = {'batches': -2} + + # Run the model + sp_path = model.run(output=False) + + # Check that exactly 2 running statepoint files exist + running = sorted(glob.glob('statepoint.running.*.h5')) + assert len(running) == 2, f'Expected 2 running statepoint files, found {len(running)}: {running}' + + # Verify the batch numbers are 4 and 5 (last 2 of 5 batches) + batch_nums = sorted([int(f.split('.')[2]) for f in running]) + assert batch_nums == [4, 5], f'Expected batches [4, 5], found {batch_nums}' + + # Clean up the running statepoint files + for f in running: + if os.path.exists(f): + os.remove(f)