|
24 | 24 | from tensorflow_addons.activations import rrelu |
25 | 25 | from tensorflow_addons.utils import test_utils |
26 | 26 |
|
27 | | - |
28 | | -def _ref_rrelu(x, lower, upper): |
29 | | - return tf.where(x >= 0, x, (lower + upper) * x / 2) |
| 27 | +SEED = 111111 |
30 | 28 |
|
31 | 29 |
|
32 | 30 | @test_utils.run_all_in_graph_and_eager_modes |
33 | 31 | class RreluTest(tf.test.TestCase, parameterized.TestCase): |
34 | 32 | @parameterized.named_parameters(("float16", np.float16), |
35 | 33 | ("float32", np.float32), |
36 | 34 | ("float64", np.float64)) |
37 | | - @tf.function |
38 | 35 | def test_rrelu(self, dtype): |
39 | 36 | x = tf.constant([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=dtype) |
40 | 37 | lower = 0.1 |
41 | 38 | upper = 0.2 |
42 | | - result = rrelu(x, lower, upper, training=False) |
43 | | - expect_result = _ref_rrelu(x, lower, upper) |
44 | | - self.assertAllCloseAccordingToType(result, expect_result) |
| 39 | + |
| 40 | + training_results = { |
| 41 | + np.float16: [-0.288330078, -0.124206543, 0, 1, 2], |
| 42 | + np.float32: [-0.26851666, -0.116421416, 0, 1, 2], |
| 43 | + np.float64: [-0.3481333923206531, -0.17150176242558851, 0, 1, 2], |
| 44 | + } |
| 45 | + for training in [True, False]: |
| 46 | + with self.subTest(training=training): |
| 47 | + tf.random.set_seed(SEED) |
| 48 | + result = rrelu(x, lower, upper, training=training, seed=SEED) |
| 49 | + if training: |
| 50 | + expect_result = training_results.get(dtype) |
| 51 | + else: |
| 52 | + expect_result = [ |
| 53 | + -0.30000001192092896, -0.15000000596046448, 0, 1, 2 |
| 54 | + ] |
| 55 | + self.assertAllCloseAccordingToType(result, expect_result) |
45 | 56 |
|
46 | 57 | @parameterized.named_parameters(("float32", np.float32), |
47 | 58 | ("float64", np.float64)) |
48 | 59 | def test_theoretical_gradients(self, dtype): |
49 | | - x = tf.constant([-2.0, -1.0, -0.1, 0.1, 1.0, 2.0], dtype=dtype) |
50 | | - lower = 0.1 |
51 | | - upper = 0.2 |
52 | | - for training in [True, False]: |
53 | | - with self.subTest(training=training): |
54 | | - theoretical, numerical = tf.test.compute_gradient( |
55 | | - lambda x: rrelu( |
56 | | - x, lower, upper, training=training, seed=111111), [x]) |
57 | | - # TODO: investigate the difference between CPU and GPU |
58 | | - if training is True and tf.test.is_gpu_available() is False: |
59 | | - numerical = [[[0.134971, 0., 0., 0., 0., 0.], |
60 | | - [0., 0.15648358, 0., 0., 0., 0.], |
61 | | - [0., 0., 0.18776372, 0., 0., 0.], |
62 | | - [0., 0., 0., 1., 0., 0.], |
63 | | - [0., 0., 0., 0., 1., 0.], |
64 | | - [0., 0., 0., 0., 0., 1.]]] |
65 | | - self.assertAllCloseAccordingToType( |
66 | | - theoretical, numerical, rtol=5e-4, atol=5e-4) |
67 | | - |
68 | | - def test_unknown_shape(self): |
69 | | - fn = rrelu.get_concrete_function( |
70 | | - tf.TensorSpec(shape=None, dtype=tf.float32)) |
71 | | - |
72 | | - for shape in [(1,), (1, 2), (1, 2, 3), (1, 2, 3, 4)]: |
73 | | - x = tf.ones(shape=shape, dtype=tf.float32) |
74 | | - self.assertAllClose(fn(x), rrelu(x)) |
| 60 | + if tf.executing_eagerly(): |
| 61 | + |
| 62 | + def rrelu_wrapper(lower, upper, training): |
| 63 | + def inner(x): |
| 64 | + tf.random.set_seed(SEED) |
| 65 | + return rrelu(x, lower, upper, training=training, seed=SEED) |
| 66 | + |
| 67 | + return inner |
| 68 | + |
| 69 | + x = tf.constant([-2.0, -1.0, -0.1, 0.1, 1.0, 2.0], dtype=dtype) |
| 70 | + lower = 0.1 |
| 71 | + upper = 0.2 |
| 72 | + |
| 73 | + for training in [True, False]: |
| 74 | + with self.subTest(training=training): |
| 75 | + theoretical, numerical = tf.test.compute_gradient( |
| 76 | + rrelu_wrapper(lower, upper, training), [x]) |
| 77 | + self.assertAllCloseAccordingToType( |
| 78 | + theoretical, numerical, rtol=5e-4, atol=5e-4) |
| 79 | + |
| 80 | + |
| 81 | +class RreluBenchmarks(tf.test.Benchmark): |
| 82 | + def benchmarkRreluOp(self): |
| 83 | + with tf.compat.v1.Session(config=tf.test.benchmark_config()) as sess: |
| 84 | + x = tf.constant([-2.0, -1.0, 0.0, 1.0, 2.0], dtype=np.float32) |
| 85 | + lower = 0.1 |
| 86 | + upper = 0.2 |
| 87 | + result = rrelu(x, lower, upper, training=True) |
| 88 | + self.run_op_benchmark(sess, result.op, min_iters=25) |
75 | 89 |
|
76 | 90 |
|
77 | 91 | if __name__ == "__main__": |
|
0 commit comments