1212# See the License for the specific language governing permissions and
1313# limitations under the License.
1414# ==============================================================================
15- """Tests for LazyAdamOptimizer ."""
15+ """Tests for LazyAdam ."""
1616
1717from __future__ import absolute_import
1818from __future__ import division
2323
2424from tensorflow .python .eager import context
2525from tensorflow .python .ops import variables
26- from tensorflow_addons .optimizers import lazy_adam_optimizer
26+ from tensorflow_addons .optimizers import lazy_adam
2727from tensorflow_addons .utils import test_utils
2828
2929
@@ -54,7 +54,7 @@ def get_beta_accumulators(opt, dtype):
5454 return (beta_1_power , beta_2_power )
5555
5656
57- class LazyAdamOptimizerTest (tf .test .TestCase ):
57+ class LazyAdamTest (tf .test .TestCase ):
5858
5959 # TODO: remove v1 tests (keep pace with adam_test.py in keras).
6060 @test_utils .run_deprecated_v1
@@ -80,7 +80,7 @@ def testSparse(self):
8080 grads1 = tf .IndexedSlices (
8181 tf .constant (grads1_np [grads1_np_indices ]),
8282 tf .constant (grads1_np_indices ), tf .constant ([3 ]))
83- opt = lazy_adam_optimizer . LazyAdamOptimizer ()
83+ opt = lazy_adam . LazyAdam ()
8484 update = opt .apply_gradients (
8585 zip ([grads0 , grads1 ], [var0 , var1 ]))
8686 self .evaluate (variables .global_variables_initializer ())
@@ -118,7 +118,7 @@ def testSparseDevicePlacement(self):
118118 var = tf .Variable ([[1.0 ], [2.0 ]])
119119 indices = tf .constant ([0 , 1 ], dtype = index_dtype )
120120 g_sum = lambda : tf .math .reduce_sum (tf .gather (var , indices )) # pylint: disable=cell-var-from-loop
121- optimizer = lazy_adam_optimizer . LazyAdamOptimizer (3.0 )
121+ optimizer = lazy_adam . LazyAdam (3.0 )
122122 minimize_op = optimizer .minimize (g_sum , var_list = [var ])
123123 self .evaluate (variables .global_variables_initializer ())
124124 self .evaluate (minimize_op )
@@ -137,10 +137,10 @@ def testSparseRepeatedIndices(self):
137137 grad_aggregated = tf .IndexedSlices (
138138 tf .constant ([0.2 ], shape = [1 , 1 ], dtype = dtype ),
139139 tf .constant ([1 ]), tf .constant ([2 , 1 ]))
140- repeated_update_opt = lazy_adam_optimizer . LazyAdamOptimizer ()
140+ repeated_update_opt = lazy_adam . LazyAdam ()
141141 repeated_update = repeated_update_opt .apply_gradients (
142142 [(grad_repeated_index , repeated_index_update_var )])
143- aggregated_update_opt = lazy_adam_optimizer . LazyAdamOptimizer ()
143+ aggregated_update_opt = lazy_adam . LazyAdam ()
144144 aggregated_update = aggregated_update_opt .apply_gradients (
145145 [(grad_aggregated , aggregated_update_var )])
146146 self .evaluate (variables .global_variables_initializer ())
@@ -181,8 +181,7 @@ def doTestBasic(self, use_callable_params=False):
181181 beta2 = beta2 ()
182182 epsilon = epsilon ()
183183
184- opt = lazy_adam_optimizer .LazyAdamOptimizer (
185- learning_rate = learning_rate )
184+ opt = lazy_adam .LazyAdam (learning_rate = learning_rate )
186185 if not context .executing_eagerly ():
187186 update = opt .apply_gradients (
188187 zip ([grads0 , grads1 ], [var0 , var1 ]))
@@ -241,7 +240,7 @@ def testTensorLearningRate(self):
241240 var1 = tf .Variable (var1_np )
242241 grads0 = tf .constant (grads0_np )
243242 grads1 = tf .constant (grads1_np )
244- opt = lazy_adam_optimizer . LazyAdamOptimizer (tf .constant (0.001 ))
243+ opt = lazy_adam . LazyAdam (tf .constant (0.001 ))
245244 update = opt .apply_gradients (
246245 zip ([grads0 , grads1 ], [var0 , var1 ]))
247246 self .evaluate (variables .global_variables_initializer ())
@@ -285,7 +284,7 @@ def testSharing(self):
285284 var1 = tf .Variable (var1_np )
286285 grads0 = tf .constant (grads0_np )
287286 grads1 = tf .constant (grads1_np )
288- opt = lazy_adam_optimizer . LazyAdamOptimizer ()
287+ opt = lazy_adam . LazyAdam ()
289288 update1 = opt .apply_gradients (
290289 zip ([grads0 , grads1 ], [var0 , var1 ]))
291290 update2 = opt .apply_gradients (
@@ -324,7 +323,7 @@ def testSlotsUniqueEager(self):
324323 with context .eager_mode ():
325324 v1 = tf .Variable (1. )
326325 v2 = tf .Variable (1. )
327- opt = lazy_adam_optimizer . LazyAdamOptimizer (1. )
326+ opt = lazy_adam . LazyAdam (1. )
328327 opt .minimize (lambda : v1 + v2 , var_list = [v1 , v2 ])
329328 # There should be iteration, and two unique slot variables for v1 and v2.
330329 self .assertEqual (5 , len (set (opt .variables ())))
0 commit comments