Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
21eac17
feat: moved config into separate files
bobbomania Dec 23, 2025
3592271
feat: added entropy metric
bobbomania Jan 4, 2026
468c8ab
feat: added rest of metric evaluation code
bobbomania Jan 4, 2026
3d247ff
feat: increased iterations for more stable bo
bobbomania Jan 4, 2026
bf7e138
feat: changed the summary committing and output best values found
bobbomania Jan 5, 2026
fa2f64d
feat: updated bayes opt to record metrics for summary
bobbomania Jan 6, 2026
d27f277
feat: fixed initial point generation for bayes opt
bobbomania Jan 7, 2026
0d96d72
feat: added cpu flag in experiment setup
bobbomania Jan 7, 2026
63b23d4
feat: added resnet config file
gabtro03 Jan 12, 2026
914ff14
feat: added strategies for different optimisation goals to HEART'25
bobbomania Jan 13, 2026
96f4f5f
merge: updated with old uncommitted changes from CC machine
bobbomania Jan 13, 2026
167bc21
feat: added use of current strategy for bayesian optimisation
bobbomania Jan 13, 2026
57d248e
feat: extracted strategy logic into separate task
bobbomania Jan 14, 2026
1cb9376
merge: updated with changes from KAN-24 to incorporate strategies
bobbomania Jan 14, 2026
795777d
feat: added results table for various strategies including top results
bobbomania Jan 14, 2026
bd3106e
feat: added pareto front plot and correlation matrix plot
bobbomania Jan 14, 2026
1275200
feat: added resnet model from trustworthy_dl
bobbomania Jan 14, 2026
4a84cd0
feat: added TF environment variables to ensure determinism
bobbomania Jan 14, 2026
283d631
fix: removed disabling of amp and shuffled initialise experiment
bobbomania Jan 14, 2026
11325eb
fix: fixed determinism for ece calculation
bobbomania Jan 14, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
logs/
results/
prj/
__pycache__/
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ An orchestration framework for co-optimizing neural network models on hardware p

Once it is created:
```bash
conda activate nauticml
conda activate nauticML
```

3. **Start Prefect and PostgreSQL services**
Expand Down
103 changes: 0 additions & 103 deletions config.yaml

This file was deleted.

145 changes: 145 additions & 0 deletions config/config_lenet.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
---

experiment:
seed: 42
save_dir: ./prj/exp_mnist_bayes_lenet_try
save_model: mnist_bayes_lenet_10samples_mc
gpus: [0]
ckpt_file: best_chkp.tf

dataset:
name: mnist
data: <dict>
mean: 0.0
std: 1.0

model:
original: <obj>
logic: <obj>
name: lenet
is_quant: false
dropout_rate: 0.4
p_rate: 0.05
scale_factor: 0.3
dropout_type: mc
num_bayes_layer: 3

# NOTE: introduce typing so that we can type dictionaries too
S1:
accuracy:
weight: 0.25
base: 0.99
ece:
weight: -0.25
base: 0.03
ape:
weight: 0.25
base: 1.5
flops:
weight: -0.25
base: 5340192

top_n: 3
# special metric calculated from the other metrics
top_metric: score

name: Balance

S2:
accuracy:
weight: 0.75
base: 0.99
ece:
weight: 0.00
base: 0.03
ape:
weight: 0.00
base: 1.5
flops:
weight: -0.25
base: 5340192

top_n: 1
top_metric: flops

name: Opt-Efficiency

strategy:
strategies: [ S1 ]
curr_strategy: <int>
curr_results: ((bayes_opt.summary))

terminate_strategies: <bool>
results: <dict>
save_dir: ((experiment.save_dir))


bayes_opt:
iteration: <int>
terminate: <bool>
engine: <obj>
score: <float>
summary: <list>

curr_strategy: <obj>

control:
params: dict
suggests: dict

num_iter: 2
seed: ((experiment.seed))
tunable:
dropout_rate:
value: ((model.dropout_rate))
space: [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.80, 0.85, 0.90, 0.95]
# space: [0.1, 0.2, 0.3, 0.4]
p_rate:
value: ((model.p_rate))
space: [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.80, 0.85, 0.90, 0.95]
# space: [0.0, 0.1, 0.2]
num_bayes_layer:
value: ((model.num_bayes_layer))
space: [1, 2, 3]
scale_factor:
value: ((model.scale_factor))
# space: [0.5, 1.0, 1.5]
space: [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.80, 0.85, 0.90, 0.95]

metrics:
accuracy: ((eval.accuracy))
ece: ((eval.ece))
ape: ((eval.ape))
flops: ((eval.flops))

train:
optimizer: <obj>
id: ((bayes_opt.iteration))
num_epoch: 3
batch_size: 128
learning_rate: 0.01
validation_split: 0.1

eval:
ece: <float>
ape: <float>
accuracy: <float>
flops: <float>

mc_samples: 5
num_eval_images: 200
num_bins: 10


reporter:
log:
- bayes_opt.iteration
- dropout_rate_list
- p_rate
- num_bayes_layer
- scale_factor
- accuracy
- flops
- ece
- ape
- score
Loading