Skip to content

Commit 63bb4b4

Browse files
Squadrickseanpmorgan
authored andcommitted
Rename LazyAdamOptimizer to LazyAdam (#109)
* Rename LazyAdamOptimizer to LazyAdam
1 parent 8dad6d2 commit 63bb4b4

File tree

6 files changed

+21
-22
lines changed

6 files changed

+21
-22
lines changed

tensorflow_addons/examples/tfa_optimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,11 @@ def generate_data(num_validation):
6565

6666

6767
def train_and_eval():
68-
"""Train and evalute simple MNIST model using LazyAdamOptimizer."""
68+
"""Train and evalute simple MNIST model using LazyAdam."""
6969
data = generate_data(num_validation=VALIDATION_SAMPLES)
7070
dense_net = build_mnist_model()
7171
dense_net.compile(
72-
optimizer=tfa.optimizers.LazyAdamOptimizer(0.001),
72+
optimizer=tfa.optimizers.LazyAdam(0.001),
7373
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
7474
metrics=['accuracy'])
7575

tensorflow_addons/optimizers/BUILD

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ py_library(
66
name = "optimizers",
77
srcs = [
88
"__init__.py",
9-
"lazy_adam_optimizer.py",
9+
"lazy_adam.py",
1010
],
1111
srcs_version = "PY2AND3",
1212
deps = [
@@ -15,12 +15,12 @@ py_library(
1515
)
1616

1717
py_test(
18-
name = "lazy_adam_optimizer_test",
18+
name = "lazy_adam_test",
1919
size = "small",
2020
srcs = [
21-
"lazy_adam_optimizer_test.py",
21+
"lazy_adam_test.py",
2222
],
23-
main = "lazy_adam_optimizer_test.py",
23+
main = "lazy_adam_test.py",
2424
srcs_version = "PY2AND3",
2525
deps = [
2626
":optimizers",

tensorflow_addons/optimizers/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,12 @@
33
## Maintainers
44
| Submodule | Maintainers | Contact Info |
55
|:---------- |:------------- |:--------------|
6-
| lazy_adam_optimizer | SIG-Addons | addons@tensorflow.org |
6+
| lazy_adam | SIG-Addons | addons@tensorflow.org |
77

88
## Components
99
| Submodule | Optimizer | Reference |
1010
|:----------------------- |:---------------------- |:---------|
11-
| lazy_adam_optimizer | LazyAdamOptimizer | https://arxiv.org/abs/1412.6980 |
11+
| lazy_adam | LazyAdam | https://arxiv.org/abs/1412.6980 |
1212

1313

1414
## Contribution Guidelines

tensorflow_addons/optimizers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@
1818
from __future__ import division
1919
from __future__ import print_function
2020

21-
from tensorflow_addons.optimizers.lazy_adam_optimizer import LazyAdamOptimizer
21+
from tensorflow_addons.optimizers.lazy_adam import LazyAdam

tensorflow_addons/optimizers/lazy_adam_optimizer.py renamed to tensorflow_addons/optimizers/lazy_adam.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030

3131
@keras_utils.register_keras_custom_object
32-
class LazyAdamOptimizer(tf.keras.optimizers.Adam):
32+
class LazyAdam(tf.keras.optimizers.Adam):
3333
"""Variant of the Adam optimizer that handles sparse updates more
3434
efficiently.
3535

tensorflow_addons/optimizers/lazy_adam_optimizer_test.py renamed to tensorflow_addons/optimizers/lazy_adam_test.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
# ==============================================================================
15-
"""Tests for LazyAdamOptimizer."""
15+
"""Tests for LazyAdam."""
1616

1717
from __future__ import absolute_import
1818
from __future__ import division
@@ -23,7 +23,7 @@
2323

2424
from tensorflow.python.eager import context
2525
from tensorflow.python.ops import variables
26-
from tensorflow_addons.optimizers import lazy_adam_optimizer
26+
from tensorflow_addons.optimizers import lazy_adam
2727
from tensorflow_addons.utils import test_utils
2828

2929

@@ -54,7 +54,7 @@ def get_beta_accumulators(opt, dtype):
5454
return (beta_1_power, beta_2_power)
5555

5656

57-
class LazyAdamOptimizerTest(tf.test.TestCase):
57+
class LazyAdamTest(tf.test.TestCase):
5858

5959
# TODO: remove v1 tests (keep pace with adam_test.py in keras).
6060
@test_utils.run_deprecated_v1
@@ -80,7 +80,7 @@ def testSparse(self):
8080
grads1 = tf.IndexedSlices(
8181
tf.constant(grads1_np[grads1_np_indices]),
8282
tf.constant(grads1_np_indices), tf.constant([3]))
83-
opt = lazy_adam_optimizer.LazyAdamOptimizer()
83+
opt = lazy_adam.LazyAdam()
8484
update = opt.apply_gradients(
8585
zip([grads0, grads1], [var0, var1]))
8686
self.evaluate(variables.global_variables_initializer())
@@ -118,7 +118,7 @@ def testSparseDevicePlacement(self):
118118
var = tf.Variable([[1.0], [2.0]])
119119
indices = tf.constant([0, 1], dtype=index_dtype)
120120
g_sum = lambda: tf.math.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop
121-
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
121+
optimizer = lazy_adam.LazyAdam(3.0)
122122
minimize_op = optimizer.minimize(g_sum, var_list=[var])
123123
self.evaluate(variables.global_variables_initializer())
124124
self.evaluate(minimize_op)
@@ -137,10 +137,10 @@ def testSparseRepeatedIndices(self):
137137
grad_aggregated = tf.IndexedSlices(
138138
tf.constant([0.2], shape=[1, 1], dtype=dtype),
139139
tf.constant([1]), tf.constant([2, 1]))
140-
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
140+
repeated_update_opt = lazy_adam.LazyAdam()
141141
repeated_update = repeated_update_opt.apply_gradients(
142142
[(grad_repeated_index, repeated_index_update_var)])
143-
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
143+
aggregated_update_opt = lazy_adam.LazyAdam()
144144
aggregated_update = aggregated_update_opt.apply_gradients(
145145
[(grad_aggregated, aggregated_update_var)])
146146
self.evaluate(variables.global_variables_initializer())
@@ -181,8 +181,7 @@ def doTestBasic(self, use_callable_params=False):
181181
beta2 = beta2()
182182
epsilon = epsilon()
183183

184-
opt = lazy_adam_optimizer.LazyAdamOptimizer(
185-
learning_rate=learning_rate)
184+
opt = lazy_adam.LazyAdam(learning_rate=learning_rate)
186185
if not context.executing_eagerly():
187186
update = opt.apply_gradients(
188187
zip([grads0, grads1], [var0, var1]))
@@ -241,7 +240,7 @@ def testTensorLearningRate(self):
241240
var1 = tf.Variable(var1_np)
242241
grads0 = tf.constant(grads0_np)
243242
grads1 = tf.constant(grads1_np)
244-
opt = lazy_adam_optimizer.LazyAdamOptimizer(tf.constant(0.001))
243+
opt = lazy_adam.LazyAdam(tf.constant(0.001))
245244
update = opt.apply_gradients(
246245
zip([grads0, grads1], [var0, var1]))
247246
self.evaluate(variables.global_variables_initializer())
@@ -285,7 +284,7 @@ def testSharing(self):
285284
var1 = tf.Variable(var1_np)
286285
grads0 = tf.constant(grads0_np)
287286
grads1 = tf.constant(grads1_np)
288-
opt = lazy_adam_optimizer.LazyAdamOptimizer()
287+
opt = lazy_adam.LazyAdam()
289288
update1 = opt.apply_gradients(
290289
zip([grads0, grads1], [var0, var1]))
291290
update2 = opt.apply_gradients(
@@ -324,7 +323,7 @@ def testSlotsUniqueEager(self):
324323
with context.eager_mode():
325324
v1 = tf.Variable(1.)
326325
v2 = tf.Variable(1.)
327-
opt = lazy_adam_optimizer.LazyAdamOptimizer(1.)
326+
opt = lazy_adam.LazyAdam(1.)
328327
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
329328
# There should be iteration, and two unique slot variables for v1 and v2.
330329
self.assertEqual(5, len(set(opt.variables())))

0 commit comments

Comments
 (0)