-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathinput_model_optimization.py
More file actions
169 lines (127 loc) · 5.88 KB
/
input_model_optimization.py
File metadata and controls
169 lines (127 loc) · 5.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import json
from utils.config_utils import combine_configs, process_config
from algos.uniform_noise import UniformNoise
from algos.nopeek import NoPeek
from algos.simba_algo import SimbaAttack
from utils.metrics import MetricLoader
import torch
from models.skip import skip
import os
class InputModelOptimization(SimbaAttack):
def __init__(self, config, utils):
super().__init__(utils)
self.initialize(config)
def create_gen_model(self):
gen_model = skip(3, 3, num_channels_down = [16, 32, 64, 128, 128, 128],
num_channels_up = [16, 32, 64, 128, 128, 128],
num_channels_skip = [4, 4, 4, 4, 4, 4],
filter_size_down = [7, 7, 5, 5, 3, 3], # type: ignore
filter_size_up = [7, 7, 5, 5, 3, 3], # type: ignore
upsample_mode='nearest', downsample_mode='avg', need_sigmoid=True, pad='zero', act_fun='LeakyReLU') #.type(torch.cuda.FloatTensor)
gen_model.to(self.utils.device)
return gen_model
def initialize(self, config):
self.attribute = config["attribute"]
self.obf_model_name = config["target_model"]
self.img_size = config["img_size"]
# load obfuscator model
target_exp_config = json.load(open(config["target_model_config"])) #config_loader(config["model_config"])
system_config = json.load(open("./configs/system_config.json")) #config_loader(config["model_config"])
target_exp_config["client"]["challenge"] = True
target_config = process_config(combine_configs(system_config, target_exp_config))
self.target_config = target_config
from interface import load_algo
self.obf_model = load_algo(target_config, self.utils)
if not config["target_model"] == "gaussian_blur":
wts_path = config["target_model_path"]
wts = torch.load(wts_path)
if isinstance(self.obf_model.client_model, torch.nn.DataParallel): # type: ignore
self.obf_model.client_model.module.load_state_dict(wts)
else:
self.obf_model.client_model.load_state_dict(wts)
self.obf_model.enable_logs(False)
self.obf_model.set_detached(False)
self.model = self.obf_model # to prevent errors thrown
self.metric = MetricLoader(data_range=1)
self.loss_tag = "recons_loss"
self.ssim_tag = "ssim"
self.utils.logger.register_tag("train/" + self.ssim_tag)
self.l1_tag = "l1"
self.utils.logger.register_tag("train/" + self.l1_tag)
self.l2_tag = "l2"
self.utils.logger.register_tag("train/" + self.l2_tag)
self.psnr_tag = "psnr"
self.utils.logger.register_tag("train/" + self.psnr_tag)
self.iters = config["iters"]
self.lr = config["lr"]
if config["optimizer"] == "adam":
self.optim = torch.optim.Adam # type: ignore
else:
self.optim = torch.optim.SGD # type: ignore
if config["loss_fn"] == "ssim":
self.loss_fn = self.metric.ssim
self.sign = -1 # to maximize ssim
elif config["loss_fn"] == "l1":
self.loss_fn = self.metric.l1
self.sign = 1 # to minimize l1
elif config["loss_fn"] == "l2":
self.loss_fn = self.metric.l2
self.sign = 1 # to minimize l2
elif config["loss_fn"] == "lpips":
self.loss_fn = self.metric.lpips
self.sign = 1 # to minimize lpips
self.save_images = True
def forward(self, items):
if self.mode == "val":
z = items["z"]
img = items["img"]
gen_model = self.create_gen_model()
rand_inp_og = torch.rand((z.shape[0], 3, 128, 128)).detach().to(self.utils.device)
inp_noise = rand_inp_og.detach().clone()
optim = self.optim(gen_model.parameters(), lr=self.lr)
prefix = "train/"
self.utils.logger.set_log_freq(self.iters)
# log the lowest loss
best_ssim = 0
best_ys = gen_model(rand_inp_og)[:,:,:128, :128]
for i in range(self.iters):
# add noise in input space
if i < 10000:
rand_inp = rand_inp_og + (inp_noise.normal_() * 10)
for n in [x for x in gen_model.parameters() if len(x) == 4]:
n = n + n.detach().clone().normal_()*n.std()/50
elif i < 15000:
rand_inp = rand_inp_og + (inp_noise.normal_() * 2)
for n in [x for x in gen_model.parameters() if len(x) == 4]:
n = n + n.detach().clone().normal_()*n.std()/50
elif i < 20000:
rand_inp = rand_inp_og + (inp_noise.normal_() / 2)
for n in [x for x in gen_model.parameters() if len(x) == 4]:
n = n + n.detach().clone().normal_()*n.std()/50
else:
rand_inp = rand_inp_og
rand_inp = rand_inp.to(self.utils.device)
optim.zero_grad()
ys = gen_model(rand_inp)
# resize the width and height to self.image_size
ys = torch.nn.functional.interpolate(ys, size=(self.img_size, self.img_size), mode='bilinear', align_corners=True)
out = self.obf_model({"x": ys})
loss = self.loss_fn(out, z)
ssim = self.metric.ssim(img, ys)
self.utils.logger.add_entry(prefix + self.ssim_tag,
ssim.item())
if ssim > best_ssim:
best_ssim = ssim
best_ys = torch.clone(ys)
# if self.utils.logger.curr_iters % self.utils.logger.trigger_freq == 0:
# print("SSIM: ", ssim.item())
if self.save_images and self.utils.logger.curr_iters % (self.utils.logger.trigger_freq) == 0:
self.utils.save_image(ys, f"{self.utils.logger.epoch}_ys_{self.utils.logger.curr_iters}.png")
(self.sign * loss).backward()
optim.step()
if self.save_images:
self.utils.save_image(img, f"{self.utils.logger.epoch}_img.png")
self.utils.save_image(best_ys, f"{self.utils.logger.epoch}_ys.png")
self.utils.logger.flush_epoch()
def backward(self, _):
pass