Skip to content

Commit eb77ce2

Browse files
Used black on other unused files. (#1032)
1 parent 1c07333 commit eb77ce2

File tree

7 files changed

+422
-317
lines changed

7 files changed

+422
-317
lines changed

pyproject.toml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,13 @@ exclude = '''
2727
| tensorflow_addons/losses/giou_loss.py
2828
| tensorflow_addons/losses/giou_loss_test.py
2929
| tensorflow_addons/metrics/cohens_kappa.py
30-
| tensorflow_addons/metrics/cohens_kappa_test.py
3130
| tensorflow_addons/metrics/matthews_correlation_coefficient.py
3231
| tensorflow_addons/metrics/multilabel_confusion_matrix.py
3332
| tensorflow_addons/metrics/r_square.py
3433
| tensorflow_addons/optimizers/__init__.py
3534
| tensorflow_addons/optimizers/lookahead.py
36-
| tensorflow_addons/optimizers/lookahead_test.py
3735
| tensorflow_addons/optimizers/moving_average.py
38-
| tensorflow_addons/optimizers/moving_average_test.py
3936
| tensorflow_addons/optimizers/weight_decay_optimizers.py
40-
| tensorflow_addons/optimizers/weight_decay_optimizers_test.py
41-
| tensorflow_addons/rnn/cell.py
42-
| tensorflow_addons/rnn/cell_test.py
4337
| tensorflow_addons/seq2seq/attention_wrapper_test.py
4438
| tensorflow_addons/text/__init__.py
4539
| tensorflow_addons/text/crf.py

tensorflow_addons/metrics/cohens_kappa_test.py

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,21 +22,21 @@
2222
@test_utils.run_all_in_graph_and_eager_modes
2323
class CohenKappaTest(tf.test.TestCase):
2424
def test_config(self):
25-
kp_obj = CohenKappa(name='cohen_kappa', num_classes=5)
26-
self.assertEqual(kp_obj.name, 'cohen_kappa')
25+
kp_obj = CohenKappa(name="cohen_kappa", num_classes=5)
26+
self.assertEqual(kp_obj.name, "cohen_kappa")
2727
self.assertEqual(kp_obj.dtype, tf.float32)
2828
self.assertEqual(kp_obj.num_classes, 5)
2929

3030
# Check save and restore config
3131
kb_obj2 = CohenKappa.from_config(kp_obj.get_config())
32-
self.assertEqual(kb_obj2.name, 'cohen_kappa')
32+
self.assertEqual(kb_obj2.name, "cohen_kappa")
3333
self.assertEqual(kb_obj2.dtype, tf.float32)
3434
self.assertEqual(kp_obj.num_classes, 5)
3535

3636
def initialize_vars(self):
3737
kp_obj1 = CohenKappa(num_classes=5)
38-
kp_obj2 = CohenKappa(num_classes=5, weightage='linear')
39-
kp_obj3 = CohenKappa(num_classes=5, weightage='quadratic')
38+
kp_obj2 = CohenKappa(num_classes=5, weightage="linear")
39+
kp_obj3 = CohenKappa(num_classes=5, weightage="quadratic")
4040

4141
self.evaluate(tf.compat.v1.variables_initializer(kp_obj1.variables))
4242
self.evaluate(tf.compat.v1.variables_initializer(kp_obj2.variables))
@@ -78,8 +78,9 @@ def test_kappa_random_score(self):
7878
self.update_obj_states(kp_obj1, kp_obj2, kp_obj3, actuals, preds, None)
7979

8080
# Check results
81-
self.check_results([kp_obj1, kp_obj2, kp_obj3],
82-
[0.61904761, 0.62790697, 0.68932038])
81+
self.check_results(
82+
[kp_obj1, kp_obj2, kp_obj3], [0.61904761, 0.62790697, 0.68932038]
83+
)
8384

8485
def test_kappa_perfect_score(self):
8586
actuals = [4, 4, 3, 3, 2, 2, 1, 1]
@@ -109,8 +110,9 @@ def test_kappa_worse_than_random(self):
109110
self.update_obj_states(kp_obj1, kp_obj2, kp_obj3, actuals, preds, None)
110111

111112
# check results
112-
self.check_results([kp_obj1, kp_obj2, kp_obj3],
113-
[-0.3333333, -0.52380952, -0.72727272])
113+
self.check_results(
114+
[kp_obj1, kp_obj2, kp_obj3], [-0.3333333, -0.52380952, -0.72727272]
115+
)
114116

115117
def test_kappa_with_sample_weights(self):
116118
actuals = [4, 4, 3, 3, 2, 2, 1, 1]
@@ -124,12 +126,12 @@ def test_kappa_with_sample_weights(self):
124126
kp_obj1, kp_obj2, kp_obj3 = self.initialize_vars()
125127

126128
# Update
127-
self.update_obj_states(kp_obj1, kp_obj2, kp_obj3, actuals, preds,
128-
weights)
129+
self.update_obj_states(kp_obj1, kp_obj2, kp_obj3, actuals, preds, weights)
129130

130131
# check results
131-
self.check_results([kp_obj1, kp_obj2, kp_obj3],
132-
[-0.25473321, -0.38992332, -0.60695344])
132+
self.check_results(
133+
[kp_obj1, kp_obj2, kp_obj3], [-0.25473321, -0.38992332, -0.60695344]
134+
)
133135

134136
def test_kappa_reset_states(self):
135137
# Initialize
@@ -152,5 +154,5 @@ def test_large_values(self):
152154
self.assertAllClose(0.166666666, obj.result())
153155

154156

155-
if __name__ == '__main__':
157+
if __name__ == "__main__":
156158
tf.test.main()

tensorflow_addons/optimizers/lookahead_test.py

Lines changed: 30 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,8 @@ def run_dense_sample(self, iterations, optimizer, seed=0x2019):
3333
var_0 = tf.Variable(val_0, dtype=tf.dtypes.float32)
3434
var_1 = tf.Variable(val_1, dtype=tf.dtypes.float32)
3535

36-
grad_0 = tf.constant(
37-
np.random.standard_normal((2,)), dtype=tf.dtypes.float32)
38-
grad_1 = tf.constant(
39-
np.random.standard_normal((2,)), dtype=tf.dtypes.float32)
36+
grad_0 = tf.constant(np.random.standard_normal((2,)), dtype=tf.dtypes.float32)
37+
grad_1 = tf.constant(np.random.standard_normal((2,)), dtype=tf.dtypes.float32)
4038

4139
grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))
4240

@@ -62,11 +60,15 @@ def run_sparse_sample(self, iterations, optimizer, seed=0x2019):
6260
var_1 = tf.Variable(val_1, dtype=tf.dtypes.float32)
6361

6462
grad_0 = tf.IndexedSlices(
65-
tf.constant([np.random.standard_normal()]), tf.constant([0]),
66-
tf.constant([2]))
63+
tf.constant([np.random.standard_normal()]),
64+
tf.constant([0]),
65+
tf.constant([2]),
66+
)
6767
grad_1 = tf.IndexedSlices(
68-
tf.constant([np.random.standard_normal()]), tf.constant([1]),
69-
tf.constant([2]))
68+
tf.constant([np.random.standard_normal()]),
69+
tf.constant([1]),
70+
tf.constant([2]),
71+
)
7072

7173
grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))
7274

@@ -84,10 +86,9 @@ def run_sparse_sample(self, iterations, optimizer, seed=0x2019):
8486
def test_dense_exact_ratio(self):
8587
for k in [5, 10, 100]:
8688
for alpha in [0.3, 0.7]:
87-
optimizer = tf.keras.optimizers.get('adam')
89+
optimizer = tf.keras.optimizers.get("adam")
8890
vals, quick_vars = self.run_dense_sample(k, optimizer)
89-
optimizer = Lookahead(
90-
'adam', sync_period=k, slow_step_size=alpha)
91+
optimizer = Lookahead("adam", sync_period=k, slow_step_size=alpha)
9192
_, slow_vars = self.run_dense_sample(k, optimizer)
9293
for val, quick, slow in zip(vals, quick_vars, slow_vars):
9394
expected = val + (quick - val) * alpha
@@ -96,10 +97,9 @@ def test_dense_exact_ratio(self):
9697
def test_sparse_exact_ratio(self):
9798
for k in [5, 10, 100]:
9899
for alpha in [0.3, 0.7]:
99-
optimizer = tf.keras.optimizers.get('adam')
100+
optimizer = tf.keras.optimizers.get("adam")
100101
vals, quick_vars = self.run_sparse_sample(k, optimizer)
101-
optimizer = Lookahead(
102-
'adam', sync_period=k, slow_step_size=alpha)
102+
optimizer = Lookahead("adam", sync_period=k, slow_step_size=alpha)
103103
_, slow_vars = self.run_sparse_sample(k, optimizer)
104104
for val, quick, slow in zip(vals, quick_vars, slow_vars):
105105
expected = val + (quick - val) * alpha
@@ -115,7 +115,7 @@ def test_fit_simple_linear_model(self):
115115

116116
model = tf.keras.models.Sequential()
117117
model.add(tf.keras.layers.Dense(input_shape=(3,), units=1))
118-
model.compile(Lookahead('adam'), loss='mse')
118+
model.compile(Lookahead("adam"), loss="mse")
119119

120120
model.fit(x, y, epochs=3)
121121

@@ -128,15 +128,18 @@ def test_fit_simple_linear_model(self):
128128

129129
def test_model_dynamic_lr(self):
130130
grad = tf.Variable([[0.1]])
131-
model = tf.keras.Sequential([
132-
tf.keras.layers.Dense(
133-
1,
134-
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
135-
use_bias=False)
136-
])
131+
model = tf.keras.Sequential(
132+
[
133+
tf.keras.layers.Dense(
134+
1,
135+
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
136+
use_bias=False,
137+
)
138+
]
139+
)
137140
model.build(input_shape=[1, 1])
138141

139-
opt = Lookahead('adam', sync_period=10, slow_step_size=0.4)
142+
opt = Lookahead("adam", sync_period=10, slow_step_size=0.4)
140143
update = opt.apply_gradients(list(zip([grad], model.variables)))
141144

142145
self.evaluate(tf.compat.v1.global_variables_initializer())
@@ -147,13 +150,12 @@ def test_model_dynamic_lr(self):
147150
self.assertAllClose(opt.lr.read_value(), 1e-4)
148151

149152
def test_get_config(self):
150-
opt = Lookahead('adam', sync_period=10, slow_step_size=0.4)
151-
opt = tf.keras.optimizers.deserialize(
152-
tf.keras.optimizers.serialize(opt))
153+
opt = Lookahead("adam", sync_period=10, slow_step_size=0.4)
154+
opt = tf.keras.optimizers.deserialize(tf.keras.optimizers.serialize(opt))
153155
config = opt.get_config()
154-
self.assertEqual(config['sync_period'], 10)
155-
self.assertEqual(config['slow_step_size'], 0.4)
156+
self.assertEqual(config["sync_period"], 10)
157+
self.assertEqual(config["slow_step_size"], 0.4)
156158

157159

158-
if __name__ == '__main__':
160+
if __name__ == "__main__":
159161
tf.test.main()

tensorflow_addons/optimizers/moving_average_test.py

Lines changed: 42 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,8 @@ def test_run(self):
3737
opt = MovingAverage(
3838
tf.keras.optimizers.SGD(lr=2.0),
3939
sequential_update=sequential_update,
40-
average_decay=0.5)
40+
average_decay=0.5,
41+
)
4142

4243
if not tf.executing_eagerly():
4344
update = opt.apply_gradients(grads_and_vars)
@@ -51,8 +52,8 @@ def test_run(self):
5152
self.assertAllClose(var0.read_value(), [0.6, 1.6])
5253
self.assertAllClose(var1.read_value(), [2.96, 3.96])
5354

54-
ema_var0 = opt.get_slot(var0, 'average')
55-
ema_var1 = opt.get_slot(var1, 'average')
55+
ema_var0 = opt.get_slot(var0, "average")
56+
ema_var1 = opt.get_slot(var1, "average")
5657

5758
if sequential_update:
5859
self.assertAllClose(ema_var0.read_value(), [0.75, 1.75])
@@ -65,12 +66,14 @@ def test_run(self):
6566
self.assertAllClose(var0.read_value(), [0.75, 1.75])
6667
self.assertAllClose(var1.read_value(), [2.975, 3.975])
6768

68-
perturb = tf.group([
69-
var0.assign_add([1.0, 1.0]),
70-
var1.assign_add([2.0, 2.0]),
71-
ema_var0.assign_add([3.0, 3.0]),
72-
ema_var1.assign_add([4.0, 4.0])
73-
])
69+
perturb = tf.group(
70+
[
71+
var0.assign_add([1.0, 1.0]),
72+
var1.assign_add([2.0, 2.0]),
73+
ema_var0.assign_add([3.0, 3.0]),
74+
ema_var1.assign_add([4.0, 4.0]),
75+
]
76+
)
7477
self.evaluate(perturb)
7578

7679
if sequential_update:
@@ -87,12 +90,15 @@ def test_opt_failure(self):
8790

8891
def test_model_weights_update(self):
8992
grad = tf.Variable([[0.1]])
90-
model = tf.keras.Sequential([
91-
tf.keras.layers.Dense(
92-
1,
93-
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
94-
use_bias=False)
95-
])
93+
model = tf.keras.Sequential(
94+
[
95+
tf.keras.layers.Dense(
96+
1,
97+
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
98+
use_bias=False,
99+
)
100+
]
101+
)
96102
model.build(input_shape=[1, 1])
97103
self.evaluate(tf.compat.v1.global_variables_initializer())
98104

@@ -109,17 +115,19 @@ def test_model_weights_update(self):
109115

110116
def test_model_dynamic_lr(self):
111117
grad = tf.Variable([[0.1]])
112-
model = tf.keras.Sequential([
113-
tf.keras.layers.Dense(
114-
1,
115-
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
116-
use_bias=False)
117-
])
118+
model = tf.keras.Sequential(
119+
[
120+
tf.keras.layers.Dense(
121+
1,
122+
kernel_initializer=tf.keras.initializers.Constant([[1.0]]),
123+
use_bias=False,
124+
)
125+
]
126+
)
118127
model.build(input_shape=[1, 1])
119128
self.evaluate(tf.compat.v1.global_variables_initializer())
120129

121-
opt = MovingAverage(
122-
tf.keras.optimizers.SGD(lr=1e-3), average_decay=0.5)
130+
opt = MovingAverage(tf.keras.optimizers.SGD(lr=1e-3), average_decay=0.5)
123131
update = opt.apply_gradients(list(zip([grad], model.variables)))
124132

125133
self.evaluate(tf.compat.v1.global_variables_initializer())
@@ -130,21 +138,20 @@ def test_model_dynamic_lr(self):
130138
self.assertAllClose(opt.lr.read_value(), 1e-4)
131139

132140
def test_optimizer_string(self):
133-
_ = MovingAverage('adam')
141+
_ = MovingAverage("adam")
134142

135143
def test_config(self):
136144
sgd_opt = tf.keras.optimizers.SGD(
137-
lr=2.0, nesterov=True, momentum=0.3, decay=0.1)
145+
lr=2.0, nesterov=True, momentum=0.3, decay=0.1
146+
)
138147
opt = MovingAverage(
139-
sgd_opt,
140-
average_decay=0.5,
141-
num_updates=None,
142-
sequential_update=False)
148+
sgd_opt, average_decay=0.5, num_updates=None, sequential_update=False
149+
)
143150
config = opt.get_config()
144151

145-
self.assertEqual(config['average_decay'], 0.5)
146-
self.assertEqual(config['num_updates'], None)
147-
self.assertEqual(config['sequential_update'], False)
152+
self.assertEqual(config["average_decay"], 0.5)
153+
self.assertEqual(config["num_updates"], None)
154+
self.assertEqual(config["sequential_update"], False)
148155

149156
new_opt = MovingAverage.from_config(config)
150157
old_sgd_config = opt._optimizer.get_config()
@@ -166,8 +173,8 @@ def test_fit_simple_linear_model(self):
166173
model.add(tf.keras.layers.Dense(input_shape=(3,), units=1))
167174
self.evaluate(tf.compat.v1.global_variables_initializer())
168175

169-
opt = MovingAverage('adam')
170-
model.compile(opt, loss='mse')
176+
opt = MovingAverage("adam")
177+
model.compile(opt, loss="mse")
171178

172179
model.fit(x, y, epochs=10)
173180
opt.assign_average_vars(model.variables)
@@ -181,5 +188,5 @@ def test_fit_simple_linear_model(self):
181188
self.assertLess(max_abs_diff, 1e-3)
182189

183190

184-
if __name__ == '__main__':
191+
if __name__ == "__main__":
185192
tf.test.main()

0 commit comments

Comments
 (0)