Skip to content
114 changes: 53 additions & 61 deletions auglab/configs/transform_params_gpu.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
"kernel_type": "Scharr",
"absolute": true,
"retain_stats": true,
"mix_prob": 0.90,
"in_seg": 0.5,
"out_seg": 0.5,
"mix_prob": 0.50,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.15
},
Expand All @@ -14,18 +14,18 @@
"sigma": 1.0,
"retain_stats": false,
"mix_prob": 0.50,
"in_seg": 0.5,
"out_seg": 0.5,
"mix_in_out": false,
"probability": 0.20
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.15
},
"UnsharpMaskTransform": {
"kernel_type": "UnsharpMask",
"sigma": 1.0,
"unsharp_amount": 1.5,
"retain_stats": false,
"in_seg": 0.5,
"out_seg": 0.5,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"mix_prob": 0.50,
"probability": 0.10
Expand All @@ -34,83 +34,85 @@
"kernel_type": "RandConv",
"kernel_sizes": [3,5,7],
"retain_stats": true,
"in_seg": 0.5,
"out_seg": 0.5,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"mix_prob": 0.50,
"probability": 0.10
},
"RedistributeSegTransform": {
"in_seg": 0.2,
"in_seg": 0.25,
"retain_stats": true,
"probability": 0.5
"probability": 0.1
},
"GaussianNoiseTransform": {
"mean": 0.0,
"std": 0.1,
"std": 1.0,
"in_seg": 0.5,
"out_seg": 0.5,
"mix_in_out": true,
"probability": 0.10
"probability": 0.15
},
"ClampTransform": {
"max_clamp_amount": 0.2,
"retain_stats": false,
"in_seg": 0.5,
"out_seg": 0.5,
"retain_stats": true,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.40
"probability": 0.05
},
"BrightnessTransform": {
"brightness_range": [0.75, 1.25],
"in_seg": 0.0,
"out_seg": 0.0,
"mix_in_out": false,
"probability": 0.15
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.5
},
"GammaTransform": {
"gamma_range": [0.7, 1.5],
"retain_stats": true,
"in_seg": 0.5,
"out_seg": 0.5,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.30
"probability": 0.50
},
"InvGammaTransform": {
"gamma_range": [0.7, 1.5],
"retain_stats": true,
"in_seg": 0.5,
"out_seg": 0.5,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.10
"probability": 0.30
},
"ContrastTransform": {
"contrast_range": [0.75, 1.25],
"retain_stats": false,
"in_seg": 0.0,
"out_seg": 0.0,
"mix_in_out": false,
"probability": 0.15
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.5
},
"FunctionTransform": {
"retain_stats": true,
"in_seg": 0.5,
"out_seg": 0.5,
"mix_in_out": true,
"probability": 0.05
"in_seg": 0,
"out_seg": 0,
Comment on lines +97 to +98
Copy link

Copilot AI Jan 14, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The in_seg and out_seg values are set to 0 (integer) instead of 0.0 (float) for FunctionTransform, while all other transforms use float notation (0.0, 0.1, etc.). For consistency and to avoid potential type issues, use 0.0 for float values throughout the configuration.

Copilot uses AI. Check for mistakes.
"mix_in_out": false,
"probability": 0.025
},
"InverseTransform": {
"retain_stats": true,
"in_seg": 0.5,
"out_seg": 0.5,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.05
"mix_prob": 0.50,
"probability": 0.1
},
"HistogramEqualizationTransform": {
"retain_stats": false,
"in_seg": 0.5,
"out_seg": 0.5,
"retain_stats": true,
"in_seg": 0,
"out_seg": 0,
"mix_in_out": true,
"mix_prob": 0.80,
"probability": 0.10
},
"SimulateLowResTransform": {
Expand All @@ -123,14 +125,14 @@
"scale": [0.6, 1.0],
"crop": [1.0, 1.0],
"same_on_batch": false,
"probability": 0.40
"probability": 0.05
},
"BiasFieldTransform": {
"retain_stats": false,
"coefficients": 0.5,
"in_seg": 0.5,
"out_seg": 0.5,
"mix_in_out": false,
"retain_stats": true,
"coefficients": 0.2,
"in_seg": 0.1,
"out_seg": 0.1,
"mix_in_out": true,
"probability": 0.10
},
"FlipTransform": {
Expand All @@ -145,19 +147,9 @@
"scale": [0.7, 1.4],
"shear": [-5, 5, -5, 5, -5, 5],
"resample": "bilinear",
"probability": 0
},
"nnUNetSpatialTransform": {
"patch_center_dist_from_border": 80,
"random_crop": true,
"p_elastic_deform": 0.2,
"p_rotation": 0.5,
"p_scaling": 0.5,
"scaling": [0.7, 1.4],
"p_synchronize_scaling_across_axes": 1,
"bg_style_seg_sampling": false
"probability": 0.5
},
"ZscoreNormalizationTransform": {
"probability": 0.3
"probability": 0.0
}
}
19 changes: 9 additions & 10 deletions auglab/trainers/nnUNetTrainerDAExt.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def get_training_transforms(
else:
patch_size_spatial = patch_size
ignore_axes = None

if 'nnUNetSpatialTransform' in config:
spatial_params = config['nnUNetSpatialTransform']
else:
Expand All @@ -234,7 +234,7 @@ def get_training_transforms(

if do_dummy_2d_data_aug:
transforms.append(Convert2DTo3DTransform())

if use_mask_for_norm is not None and any(use_mask_for_norm):
transforms.append(MaskImageTransform(
apply_to_channels=[i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]],
Expand Down Expand Up @@ -284,8 +284,8 @@ def get_training_transforms(
channel_in_seg=0
)
)
transforms.append(ZscoreNormalization())

# transforms.append(ZscoreNormalization())

# NOTE: DownsampleSegForDSTransform is now handled in train_step for GPU augmentations
# if deep_supervision_scales is not None:
Expand Down Expand Up @@ -323,13 +323,13 @@ def get_validation_transforms(
channel_in_seg=0
)
)
transforms.append(ZscoreNormalization())

# transforms.append(ZscoreNormalization())

if deep_supervision_scales is not None:
transforms.append(DownsampleSegForDSTransform(ds_scales=deep_supervision_scales))
return ComposeTransforms(transforms)

def train_step(self, batch: dict) -> dict:
data = batch['data']
target = batch['target']
Expand All @@ -350,13 +350,13 @@ def train_step(self, batch: dict) -> dict:
with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():
# Apply GPU augmentations to full-resolution data/target
data, target = self.transforms(data, target)

# Create multi-scale targets for deep supervision after augmentation
deep_supervision_scales = self._get_deep_supervision_scales()
if deep_supervision_scales is not None:
ds_transform = DownsampleSegForDSTransformCustom(ds_scales=deep_supervision_scales)
target = ds_transform(target)

output = self.network(data)
# del data
l = self.loss(output, target)
Expand Down Expand Up @@ -519,4 +519,3 @@ def train_step(self, batch: dict) -> dict:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
return {'loss': l.detach().cpu().numpy()}

Loading