Skip to content

Commit f4db0b1

Browse files
Tomer Kaftanfacaiy
authored andcommitted
Add Addons namespace to Custom Ops (#516)
* Namespaced all of the custom ops * Updated C++ namespaces to not conflict w/ TF contrib ones * Ran code reformatting tool * Port bug fix in TF contrib to addons. (#497) * Port bug fix in TF contrib to addons. Original change at tensorflow/tensorflow@a913689. * Fix lint warning. * check pass through and do the expand_dims() only if needed (#464) * check pass through and do the expand_dims() only if needed * add indent to the fixed line * merge return condition to if state * add hardshrink kernel (#500) * add hardshrink kernel * make linter happy * Fixing SequenceLoss Keras incompatibility (#503) * Fix SequenceLoss incompatibility with Keras built-in loops * Remove debugging prints * Change the attribute existence checking to use more pythonic way * Replace some compat.v1 APIs by their v2 equivalent (#507) * Replace some compat.v1 APIs by their v2 equivalent * Fix lint error * Add documentation for LazyAdam (#515) * Updated hardshrink custom ops & made #ifdef names more consistent. * Fix to undef
1 parent fd51dc7 commit f4db0b1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+213
-144
lines changed

tensorflow_addons/activations/gelu.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,10 @@ def gelu(x, approximate=True):
4646
A `Tensor`. Has the same type as `x`.
4747
"""
4848
x = tf.convert_to_tensor(x)
49-
return _activation_ops_so.gelu(x, approximate)
49+
return _activation_ops_so.addons_gelu(x, approximate)
5050

5151

52-
@tf.RegisterGradient("Gelu")
52+
@tf.RegisterGradient("Addons>Gelu")
5353
def _gelu_grad(op, grad):
54-
return _activation_ops_so.gelu_grad(grad, op.inputs[0],
55-
op.get_attr("approximate"))
54+
return _activation_ops_so.addons_gelu_grad(grad, op.inputs[0],
55+
op.get_attr("approximate"))

tensorflow_addons/activations/hardshrink.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,11 @@ def hardshrink(x, lower=-1.0, upper=1.0):
4242
A `Tensor`. Has the same type as `x`.
4343
"""
4444
x = tf.convert_to_tensor(x)
45-
return _activation_ops_so.hardshrink(x, lower, upper)
45+
return _activation_ops_so.addons_hardshrink(x, lower, upper)
4646

4747

48-
@tf.RegisterGradient("Hardshrink")
48+
@tf.RegisterGradient("Addons>Hardshrink")
4949
def _hardshrink_grad(op, grad):
50-
return _activation_ops_so.hardshrink_grad(grad, op.inputs[0],
51-
op.get_attr("lower"),
52-
op.get_attr("upper"))
50+
return _activation_ops_so.addons_hardshrink_grad(grad, op.inputs[0],
51+
op.get_attr("lower"),
52+
op.get_attr("upper"))

tensorflow_addons/custom_ops/activations/cc/kernels/gelu_op.cc

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,16 @@ limitations under the License.
2121
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
2222

2323
namespace tensorflow {
24+
namespace addons {
2425

2526
using CPUDevice = Eigen::ThreadPoolDevice;
2627

27-
#define REGISTER_GELU_KERNELS(type) \
28-
REGISTER_KERNEL_BUILDER( \
29-
Name("Gelu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
30-
GeluOp<CPUDevice, type>); \
31-
REGISTER_KERNEL_BUILDER( \
32-
Name("GeluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
28+
#define REGISTER_GELU_KERNELS(type) \
29+
REGISTER_KERNEL_BUILDER( \
30+
Name("Addons>Gelu").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
31+
GeluOp<CPUDevice, type>); \
32+
REGISTER_KERNEL_BUILDER( \
33+
Name("Addons>GeluGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
3334
GeluGradOp<CPUDevice, type>);
3435

3536
// Gelu only makes sense with floating points.
@@ -61,17 +62,18 @@ TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC);
6162
} // namespace functor
6263

6364
// Registration of the GPU implementations.
64-
#define REGISTER_GELU_GPU_KERNELS(type) \
65-
REGISTER_KERNEL_BUILDER( \
66-
Name("Gelu").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
67-
GeluOp<GPUDevice, type>); \
68-
REGISTER_KERNEL_BUILDER( \
69-
Name("GeluGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
65+
#define REGISTER_GELU_GPU_KERNELS(type) \
66+
REGISTER_KERNEL_BUILDER( \
67+
Name("Addons>Gelu").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
68+
GeluOp<GPUDevice, type>); \
69+
REGISTER_KERNEL_BUILDER( \
70+
Name("Addons>GeluGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
7071
GeluGradOp<GPUDevice, type>);
7172

7273
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GELU_GPU_KERNELS);
7374
#undef REGISTER_GELU_GPU_KERNELS
7475

7576
#endif // GOOGLE_CUDA
7677

77-
} // namespace tensorflow
78+
} // end namespace addons
79+
} // namespace tensorflow

tensorflow_addons/custom_ops/activations/cc/kernels/gelu_op.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
1313
limitations under the License.
1414
==============================================================================*/
1515

16-
#ifndef TENSORFLOW_ADDONS_GELU_OP_H_
17-
#define TENSORFLOW_ADDONS_GELU_OP_H_
16+
#ifndef TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_GELU_OP_H_
17+
#define TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_GELU_OP_H_
1818

1919
#define EIGEN_USE_THREADS
2020

@@ -23,6 +23,7 @@ limitations under the License.
2323
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
2424

2525
namespace tensorflow {
26+
namespace addons {
2627
namespace functor {
2728

2829
// Functor used by GeluOp to do the computations.
@@ -137,8 +138,9 @@ void GeluGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context,
137138
approximate, output->flat<T>());
138139
}
139140

141+
} // end namespace addons
140142
} // namespace tensorflow
141143

142144
#undef EIGEN_USE_THREADS
143145

144-
#endif // TENSORFLOW_ADDONS_GELU_OP_H_
146+
#endif // TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_GELU_OP_H_

tensorflow_addons/custom_ops/activations/cc/kernels/gelu_op_gpu.cu.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ limitations under the License.
2222
#include "third_party/eigen3/Eigen/Core"
2323

2424
namespace tensorflow {
25+
namespace addons {
2526

2627
using GPUDevice = Eigen::GpuDevice;
2728

@@ -31,6 +32,7 @@ using GPUDevice = Eigen::GpuDevice;
3132

3233
TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_KERNELS);
3334

35+
} // end namespace addons
3436
} // namespace tensorflow
3537

3638
#endif // GOOGLE_CUDA

tensorflow_addons/custom_ops/activations/cc/kernels/hardshrink_op.cc

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -21,16 +21,18 @@ limitations under the License.
2121
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
2222

2323
namespace tensorflow {
24+
namespace addons {
2425

2526
using CPUDevice = Eigen::ThreadPoolDevice;
2627

27-
#define REGISTER_HARDSHRINK_KERNELS(type) \
28-
REGISTER_KERNEL_BUILDER( \
29-
Name("Hardshrink").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
30-
HardshrinkOp<CPUDevice, type>); \
31-
REGISTER_KERNEL_BUILDER( \
32-
Name("HardshrinkGrad").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
33-
HardshrinkGradOp<CPUDevice, type>);
28+
#define REGISTER_HARDSHRINK_KERNELS(type) \
29+
REGISTER_KERNEL_BUILDER( \
30+
Name("Addons>Hardshrink").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
31+
HardshrinkOp<CPUDevice, type>); \
32+
REGISTER_KERNEL_BUILDER(Name("Addons>HardshrinkGrad") \
33+
.Device(DEVICE_CPU) \
34+
.TypeConstraint<type>("T"), \
35+
HardshrinkGradOp<CPUDevice, type>);
3436

3537
// Hardshrink only makes sense with floating points.
3638
TF_CALL_GPU_NUMBER_TYPES(REGISTER_HARDSHRINK_KERNELS);
@@ -61,17 +63,19 @@ TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC);
6163
} // namespace functor
6264

6365
// Registration of the GPU implementations.
64-
#define REGISTER_HARDSHRINK_GPU_KERNELS(type) \
65-
REGISTER_KERNEL_BUILDER( \
66-
Name("Hardshrink").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
67-
HardshrinkOp<GPUDevice, type>); \
68-
REGISTER_KERNEL_BUILDER( \
69-
Name("HardshrinkGrad").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
70-
HardshrinkGradOp<GPUDevice, type>);
66+
#define REGISTER_HARDSHRINK_GPU_KERNELS(type) \
67+
REGISTER_KERNEL_BUILDER( \
68+
Name("Addons>Hardshrink").Device(DEVICE_GPU).TypeConstraint<type>("T"), \
69+
HardshrinkOp<GPUDevice, type>); \
70+
REGISTER_KERNEL_BUILDER(Name("Addons>HardshrinkGrad") \
71+
.Device(DEVICE_GPU) \
72+
.TypeConstraint<type>("T"), \
73+
HardshrinkGradOp<GPUDevice, type>);
7174

7275
TF_CALL_GPU_NUMBER_TYPES(REGISTER_HARDSHRINK_GPU_KERNELS);
7376
#undef REGISTER_HARDSHRINK_GPU_KERNELS
7477

7578
#endif // GOOGLE_CUDA
7679

80+
} // end namespace addons
7781
} // namespace tensorflow

tensorflow_addons/custom_ops/activations/cc/kernels/hardshrink_op.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
1313
limitations under the License.
1414
==============================================================================*/
1515

16-
#ifndef TENSORFLOW_ADDONS_HARDSHRINK_OP_H_
17-
#define TENSORFLOW_ADDONS_HARDSHRINK_OP_H_
16+
#ifndef TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_HARDSHRINK_OP_H_
17+
#define TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_HARDSHRINK_OP_H_
1818

1919
#define EIGEN_USE_THREADS
2020

@@ -24,6 +24,8 @@ limitations under the License.
2424
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
2525

2626
namespace tensorflow {
27+
namespace addons {
28+
2729
namespace functor {
2830

2931
// Functor used by HardshrinkOp to do the computations.
@@ -134,8 +136,9 @@ void HardshrinkGradOp<Device, T>::OperateNoTemplate(OpKernelContext* context,
134136
upper, output->flat<T>());
135137
}
136138

139+
} // end namespace addons
137140
} // namespace tensorflow
138141

139142
#undef EIGEN_USE_THREADS
140143

141-
#endif // TENSORFLOW_ADDONS_HARDSHRINK_OP_H_
144+
#endif // TENSORFLOW_ADDONS_ACTIVATIONS_KERNELS_HARDSHRINK_OP_H_

tensorflow_addons/custom_ops/activations/cc/kernels/hardshrink_op_gpu.cu.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ limitations under the License.
2222
#include "third_party/eigen3/Eigen/Core"
2323

2424
namespace tensorflow {
25+
namespace addons {
2526

2627
using GPUDevice = Eigen::GpuDevice;
2728

@@ -31,6 +32,7 @@ using GPUDevice = Eigen::GpuDevice;
3132

3233
TF_CALL_GPU_NUMBER_TYPES(DEFINE_GPU_KERNELS);
3334

35+
} // end namespace addons
3436
} // namespace tensorflow
3537

3638
#endif // GOOGLE_CUDA

tensorflow_addons/custom_ops/activations/cc/ops/gelu_op.cc

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,20 +18,22 @@ limitations under the License.
1818
#include "tensorflow/core/framework/shape_inference.h"
1919

2020
namespace tensorflow {
21+
namespace addons {
2122

22-
REGISTER_OP("Gelu")
23+
REGISTER_OP("Addons>Gelu")
2324
.Input("features: T")
2425
.Output("activations: T")
2526
.Attr("T: {half, float, double}")
2627
.Attr("approximate: bool = true")
2728
.SetShapeFn(shape_inference::UnchangedShape);
2829

29-
REGISTER_OP("GeluGrad")
30+
REGISTER_OP("Addons>GeluGrad")
3031
.Input("gradients: T")
3132
.Input("features: T")
3233
.Output("backprops: T")
3334
.Attr("T: {half, float, double}")
3435
.Attr("approximate: bool = true")
3536
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
3637

37-
} // namespace tensorflow
38+
} // end namespace addons
39+
} // namespace tensorflow

tensorflow_addons/custom_ops/activations/cc/ops/hardshrink_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,15 @@ limitations under the License.
1919

2020
namespace tensorflow {
2121

22-
REGISTER_OP("Hardshrink")
22+
REGISTER_OP("Addons>Hardshrink")
2323
.Input("features: T")
2424
.Output("activations: T")
2525
.Attr("T: {half, float, double}")
2626
.Attr("lower: float = -1.0")
2727
.Attr("upper: float = 1.0")
2828
.SetShapeFn(shape_inference::UnchangedShape);
2929

30-
REGISTER_OP("HardshrinkGrad")
30+
REGISTER_OP("Addons>HardshrinkGrad")
3131
.Input("gradients: T")
3232
.Input("features: T")
3333
.Output("backprops: T")

0 commit comments

Comments
 (0)