From ad40cf640166988e8d7fdc0c60c09da4a426f9e3 Mon Sep 17 00:00:00 2001 From: Gabriel Apaza Date: Wed, 18 Mar 2026 10:23:07 -0400 Subject: [PATCH 1/4] feat: ad gradient descent information to thermoelastic2d problem, modifies OptiStep API --- engibench/core.py | 5 ++ .../thermoelastic2d/model/fea_model.py | 77 +++++++++++++++++-- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/engibench/core.py b/engibench/core.py index 1b0ff822..69d65357 100644 --- a/engibench/core.py +++ b/engibench/core.py @@ -24,6 +24,11 @@ class OptiStep: obj_values: npt.NDArray step: int + # Additional Gradient Fields + x: npt.NDArray | None = None # the current design before the gradient update + x_update: npt.NDArray | None = None # the gradient update step taken by the optimizer + obj_values_update: npt.NDArray | None = None # how the objective values change after the update step + class ObjectiveDirection(Enum): """Direction of the objective function.""" diff --git a/engibench/problems/thermoelastic2d/model/fea_model.py b/engibench/problems/thermoelastic2d/model/fea_model.py index af4eb673..3950a4cc 100644 --- a/engibench/problems/thermoelastic2d/model/fea_model.py +++ b/engibench/problems/thermoelastic2d/model/fea_model.py @@ -108,6 +108,54 @@ def get_filter(self, nelx: int, nely: int, rmin: float) -> tuple[coo_matrix, np. return h, hs + def has_converged(self, change: float, iterr: int) -> bool: + """Determines whether the optimization process has converged based on the change in design variables and iteration count. + + Args: + change (float): The maximum change in design variables from the previous iteration. + iterr (int): The current iteration number. + + Returns: + bool: True if the optimization has converged, False otherwise. Convergence is defined as either: + - The change in design variables is below a predefined threshold and a minimum number of iterations have been completed. + - The maximum number of iterations has been reached. + """ + if iterr >= self.max_iter: + return True + return change < UPDATE_THRESHOLD and iterr >= MIN_ITERATIONS + + def record_step( + self, + opti_steps: list, + obj_values: np.ndarray, + iterr: int, + x_curr: np.ndarray, + x_update: np.ndarray, + *, + extra_iter: bool, + ): # noqa: PLR0913 + """Helper to handle OptiStep creation and updates. + + Args: + opti_steps (list): The list of OptiStep instances to update. + obj_values (np.ndarray): The current objective values to record. + iterr (int): The current iteration number. + x_curr (np.ndarray): The current design variable field before the update. + x_update (np.ndarray): The change in design variables from the last update. + extra_iter (bool): Flag indicating if this is the extra iteration after convergence for gradient information gathering. + + Returns: + None. This function updates the opti_steps list in place. + """ + if extra_iter is False: + step = OptiStep(obj_values=obj_values, step=iterr, x=x_curr, x_update=x_update) + opti_steps.append(step) + + if len(opti_steps) > 1: + # Targeting the most recent step to update its 'delta' + target_idx = -2 if not extra_iter else -1 + opti_steps[target_idx].obj_values_update = obj_values.copy() - opti_steps[target_idx].obj_values + def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str, Any]: # noqa: PLR0915 """Run the optimization algorithm for the coupled structural-thermal problem. @@ -157,7 +205,7 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str # 2. Parameters penal = 3 # Penalty term - rmin = 1.1 # Filter's radius + rmin = 1.5 # Filter's radius e = 1.0 # Modulus of elasticity nu = 0.3 # Poisson's ratio k = 1.0 # Conductivity @@ -177,6 +225,9 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str c = 10000 * np.ones((m, 1)) d = np.zeros((m, 1)) + # Convergence / Iteration Criteria + extra_iter = False # This flag denotes if we are on the final extra iteration (for purpose of gathering gradient information) + # 3. Matrices ke, k_eth, c_ethm = self.get_matricies(nu, e, k, alpha) @@ -190,7 +241,7 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str f0valm = 0.0 f0valt = 0.0 - while change > UPDATE_THRESHOLD or iterr < MIN_ITERATIONS: + while not self.has_converged(change, iterr) or extra_iter is True: iterr += 1 s_time = time.time() curr_time = time.time() @@ -287,10 +338,11 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str "thermal_compliance": f0valt, "volume_fraction": vf_error, } + + # OptiStep Information vf_error = np.abs(np.mean(x) - volfrac) obj_values = np.array([f0valm, f0valt, vf_error]) - opti_step = OptiStep(obj_values=obj_values, step=iterr) - opti_steps.append(opti_step) + x_curr = x.copy() # Design variables before the gradient update (nely, nelx) df0dx = df0dx_mat.reshape(nely * nelx, 1) df0dx = (h @ (xval * df0dx)) / hs[:, None] / np.maximum(1e-3, xval) # Filtered sensitivity @@ -333,6 +385,12 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str x = xmma.reshape(nely, nelx) + # Extract the exact gradient update step for OptiStep + x_update = x.copy() - x_curr + + # Record the OptiStep + self.record_step(opti_steps, obj_values, iterr, x_curr, x_update, extra_iter=extra_iter) + # Print results change = np.max(np.abs(xmma - xold1)) change_evol.append(change) @@ -342,8 +400,15 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str f" It.: {iterr:4d} Obj.: {f0val:10.4f} Vol.: {np.sum(x) / (nelx * nely):6.3f} ch.: {change:6.3f} || t_forward:{t_forward:6.3f} + t_sensitivity:{t_sensitivity:6.3f} + t_sens_calc:{t_sensitivity_calc:6.3f} + t_mma: {t_mma:6.3f} = {t_total:6.3f}" ) - if iterr > self.max_iter: - break + # If extra_iter is True, we just did our last iteration and want to break + if extra_iter is True: + x = xold1.reshape(nely, nelx) # Revert to design before the last update (for accurate gradient information) + break # We technically don't have to break here, as the logic is built into the loop condition + + # We know we are not on the extra iteration + # Check to see if we have converged. If so, flag our extra iteration + if self.has_converged(change, iterr): + extra_iter = True print("Optimization finished...") vf_error = np.abs(np.mean(x) - volfrac) From 360bf47bb960172f7a2b17583b848960ad37d3ad Mon Sep 17 00:00:00 2001 From: Gabriel Apaza Date: Tue, 31 Mar 2026 11:10:58 -0400 Subject: [PATCH 2/4] feat: ad sensitivity information in OptiStep API --- engibench/core.py | 1 + .../thermoelastic2d/model/fea_model.py | 39 ++++++++++--------- .../thermoelastic2d/model/opti_dataclass.py | 23 +++++++++++ 3 files changed, 45 insertions(+), 18 deletions(-) create mode 100644 engibench/problems/thermoelastic2d/model/opti_dataclass.py diff --git a/engibench/core.py b/engibench/core.py index 69d65357..8a1f98d6 100644 --- a/engibench/core.py +++ b/engibench/core.py @@ -26,6 +26,7 @@ class OptiStep: # Additional Gradient Fields x: npt.NDArray | None = None # the current design before the gradient update + x_sensitivities: npt.NDArray | None = None # the sensitivities of the design variables x_update: npt.NDArray | None = None # the gradient update step taken by the optimizer obj_values_update: npt.NDArray | None = None # how the objective values change after the update step diff --git a/engibench/problems/thermoelastic2d/model/fea_model.py b/engibench/problems/thermoelastic2d/model/fea_model.py index 3950a4cc..d5a87236 100644 --- a/engibench/problems/thermoelastic2d/model/fea_model.py +++ b/engibench/problems/thermoelastic2d/model/fea_model.py @@ -14,6 +14,7 @@ from engibench.problems.thermoelastic2d.model.fem_setup import fe_mthm_bc from engibench.problems.thermoelastic2d.model.mma_subroutine import MMAInputs from engibench.problems.thermoelastic2d.model.mma_subroutine import mmasub +from engibench.problems.thermoelastic2d.model.opti_dataclass import OptiStepUpdate from engibench.problems.thermoelastic2d.utils import get_res_bounds from engibench.problems.thermoelastic2d.utils import plot_multi_physics @@ -124,31 +125,24 @@ def has_converged(self, change: float, iterr: int) -> bool: return True return change < UPDATE_THRESHOLD and iterr >= MIN_ITERATIONS - def record_step( - self, - opti_steps: list, - obj_values: np.ndarray, - iterr: int, - x_curr: np.ndarray, - x_update: np.ndarray, - *, - extra_iter: bool, - ): # noqa: PLR0913 + def record_step(self, opti_steps: list, opti_step_update: OptiStepUpdate): """Helper to handle OptiStep creation and updates. Args: - opti_steps (list): The list of OptiStep instances to update. - obj_values (np.ndarray): The current objective values to record. - iterr (int): The current iteration number. - x_curr (np.ndarray): The current design variable field before the update. - x_update (np.ndarray): The change in design variables from the last update. - extra_iter (bool): Flag indicating if this is the extra iteration after convergence for gradient information gathering. + opti_steps (list): The list of OptiStep objects to be updated in place with the new step information. + opti_step_update (OptiStepUpdate): A dataclass encapsulating all input parameters. Returns: None. This function updates the opti_steps list in place. """ + obj_values = opti_step_update.obj_values + iterr = opti_step_update.iterr + x_curr = opti_step_update.x_curr + x_sensitivities = opti_step_update.x_sensitivities + x_update = opti_step_update.x_update + extra_iter = opti_step_update.extra_iter if extra_iter is False: - step = OptiStep(obj_values=obj_values, step=iterr, x=x_curr, x_update=x_update) + step = OptiStep(obj_values=obj_values, step=iterr, x=x_curr, x_sensitivities=x_sensitivities, x_update=x_update) opti_steps.append(step) if len(opti_steps) > 1: @@ -389,7 +383,16 @@ def run(self, bcs: dict[str, Any], x_init: np.ndarray | None = None) -> dict[str x_update = x.copy() - x_curr # Record the OptiStep - self.record_step(opti_steps, obj_values, iterr, x_curr, x_update, extra_iter=extra_iter) + df0dx_all = np.stack([df0dx_m, df0dx_t, df0dx_mat, dfdx.reshape(nely, nelx)], axis=0) + opti_step_update = OptiStepUpdate( + obj_values=obj_values, + iterr=iterr, + x_curr=x_curr, + x_sensitivities=df0dx_all, + x_update=x_update, + extra_iter=extra_iter, + ) + self.record_step(opti_steps, opti_step_update) # Print results change = np.max(np.abs(xmma - xold1)) diff --git a/engibench/problems/thermoelastic2d/model/opti_dataclass.py b/engibench/problems/thermoelastic2d/model/opti_dataclass.py new file mode 100644 index 00000000..ac2e2ca5 --- /dev/null +++ b/engibench/problems/thermoelastic2d/model/opti_dataclass.py @@ -0,0 +1,23 @@ +"""This module contains the dataclass for updating an opti-step in the thermoelastic2d problem.""" + +from dataclasses import dataclass + +import numpy as np + + +@dataclass(frozen=True) +class OptiStepUpdate: + """Dataclass encapsulating all input parameters for an OptiStep update.""" + + obj_values: np.ndarray + """The objectives values of the current iteration""" + iterr: int + """The current iteration number""" + x_curr: np.ndarray + """The current design variables""" + x_sensitivities: np.ndarray + """The sensitivities of the design variables""" + x_update: np.ndarray + """The gradient update step taken by the optimizer""" + extra_iter: bool + """Whether this update is for the final iteration or not""" From 927b0e64da2f18f158de1c0b2873d20b41f97dea Mon Sep 17 00:00:00 2001 From: Gabriel Apaza Date: Tue, 31 Mar 2026 11:26:28 -0400 Subject: [PATCH 3/4] feat: ad comments to OptiStep --- engibench/core.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/engibench/core.py b/engibench/core.py index 8a1f98d6..b2365960 100644 --- a/engibench/core.py +++ b/engibench/core.py @@ -25,10 +25,14 @@ class OptiStep: step: int # Additional Gradient Fields - x: npt.NDArray | None = None # the current design before the gradient update - x_sensitivities: npt.NDArray | None = None # the sensitivities of the design variables - x_update: npt.NDArray | None = None # the gradient update step taken by the optimizer - obj_values_update: npt.NDArray | None = None # how the objective values change after the update step + x: npt.NDArray | None = None + """the current design before the gradient update""" + x_sensitivities: npt.NDArray | None = None + """the sensitivities of the design variables""" + x_update: npt.NDArray | None = None + """the gradient update step taken by the optimizer""" + obj_values_update: npt.NDArray | None = None + """how the objective values change after the update step""" class ObjectiveDirection(Enum): From 87003e740b1fa65e10cc3fef103179a852dd09f5 Mon Sep 17 00:00:00 2001 From: Gabriel Apaza Date: Tue, 31 Mar 2026 11:50:29 -0400 Subject: [PATCH 4/4] feat: fix parameter typing in thermoelastic2d --- engibench/problems/thermoelastic2d/model/fea_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engibench/problems/thermoelastic2d/model/fea_model.py b/engibench/problems/thermoelastic2d/model/fea_model.py index d5a87236..e2c271a6 100644 --- a/engibench/problems/thermoelastic2d/model/fea_model.py +++ b/engibench/problems/thermoelastic2d/model/fea_model.py @@ -125,7 +125,7 @@ def has_converged(self, change: float, iterr: int) -> bool: return True return change < UPDATE_THRESHOLD and iterr >= MIN_ITERATIONS - def record_step(self, opti_steps: list, opti_step_update: OptiStepUpdate): + def record_step(self, opti_steps: list[OptiStep], opti_step_update: OptiStepUpdate): """Helper to handle OptiStep creation and updates. Args: