Skip to content

Commit bc79099

Browse files
authored
Remove internal dependencies for cadence ops.
Differential Revision: D88964340 Pull Request resolved: #16207
1 parent b3c44c9 commit bc79099

File tree

5 files changed

+33
-37
lines changed

5 files changed

+33
-37
lines changed

backends/cadence/hifi/operators/op_quantized_conv2d_nchw_out.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/backends/cadence/generic/operators/op_quantized_conv2d.h>
910
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1011
#include <executorch/backends/cadence/hifi/operators/operators.h>
1112
#include <executorch/runtime/kernel/kernel_includes.h>
12-
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>
1313

1414
#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))
1515

backends/cadence/hifi/operators/op_quantized_conv2d_nhwc_out.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/backends/cadence/generic/operators/op_quantized_conv2d.h>
910
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1011
#include <executorch/backends/cadence/hifi/operators/operators.h>
1112
#include <executorch/runtime/kernel/kernel_includes.h>
12-
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>
1313

1414
#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))
1515

backends/cadence/hifi/operators/op_quantized_linear_out.cpp

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,20 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9-
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
109
#include <executorch/backends/cadence/hifi/operators/operators.h>
11-
#include <executorch/runtime/kernel/kernel_includes.h>
12-
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_linear.h>
13-
#include <xa_nnlib_kernels_api.h>
14-
#include <xtensa/tie/xt_datacache.h>
10+
1511
#include <algorithm>
1612
#include <cmath>
1713
#include <optional>
1814

19-
namespace impl {
20-
namespace HiFi {
21-
namespace native {
15+
#include <xa_nnlib_api.h>
16+
#include <xtensa/tie/xt_datacache.h>
17+
18+
#include <executorch/backends/cadence/generic/operators/op_quantized_linear.h>
19+
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
20+
#include <executorch/runtime/kernel/kernel_includes.h>
21+
22+
namespace impl::HiFi::native {
2223

2324
using ::executorch::aten::ScalarType;
2425
using ::executorch::aten::Tensor;
@@ -265,16 +266,16 @@ void quantized_linear_out(
265266
}
266267

267268
void quantized_linear_per_tensor_out(
268-
__ET_UNUSED KernelRuntimeContext& ctx,
269+
KernelRuntimeContext& ctx,
269270
const Tensor& in,
270271
const Tensor& weight,
271272
const Tensor& bias,
272-
int64_t in_zero_point,
273-
int64_t weight_zero_point,
274-
int64_t out_multiplier,
275-
int64_t out_shift,
276-
int64_t out_zero_point,
277-
__ET_UNUSED const optional<Tensor>& offset,
273+
const int64_t in_zero_point,
274+
const int64_t weight_zero_point,
275+
const int64_t out_multiplier,
276+
const int64_t out_shift,
277+
const int64_t out_zero_point,
278+
const optional<Tensor>& offset,
278279
Tensor& out) {
279280
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
280281
in.scalar_type() == ::executorch::aten::ScalarType::Short &&
@@ -321,6 +322,4 @@ void quantized_linear_per_tensor_out(
321322
}
322323
}
323324

324-
} // namespace native
325-
} // namespace HiFi
326-
} // namespace impl
325+
} // namespace impl::HiFi::native

backends/cadence/hifi/operators/op_quantized_matmul_out.cpp

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,18 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <stdlib.h>
10+
11+
#include <executorch/backends/cadence/generic/operators/op_quantized_matmul.h>
912
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
1013
#include <executorch/runtime/kernel/kernel_includes.h>
11-
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_matmul.h>
12-
#include <stdlib.h>
1314

14-
using executorch::aten::ScalarType;
15-
using executorch::aten::Tensor;
16-
using executorch::runtime::getLeadingDims;
17-
using torch::executor::RuntimeContext;
15+
namespace impl::HiFi::native {
1816

19-
namespace impl {
20-
namespace HiFi {
21-
namespace native {
17+
using ::executorch::aten::ScalarType;
18+
using ::executorch::aten::Tensor;
19+
using ::executorch::runtime::getLeadingDims;
20+
using ::torch::executor::RuntimeContext;
2221

2322
// The quantized matmul. The quantized matmul accumulates in a wider register,
2423
// whose type is TA.
@@ -241,6 +240,4 @@ void quantized_matmul_out(
241240
}
242241
}
243242

244-
} // namespace native
245-
} // namespace HiFi
246-
} // namespace impl
243+
} // namespace impl::HiFi::native

backends/cadence/hifi/operators/targets.bzl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,12 +122,12 @@ def define_common_targets():
122122
define_operator(op)
123123

124124
# quantized_linear_out and quantized_linear_per_tensor_out needs additional dependency for int16 support
125-
define_operator("quantized_linear_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])
126-
define_operator("quantized_linear_per_tensor_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])
125+
define_operator("quantized_linear_out", deps=["//executorch/backends/cadence/generic/operators:op_quantized_linear"])
126+
define_operator("quantized_linear_per_tensor_out", deps=["//executorch/backends/cadence/generic/operators:op_quantized_linear"])
127127

128128
# quantized_conv2d_nchw_out and quantized_conv2d_nhwc_out need additional dependency for int16 support
129-
define_operator("quantized_conv2d_nchw_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])
130-
define_operator("quantized_conv2d_nhwc_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])
129+
define_operator("quantized_conv2d_nchw_out", deps=["//executorch/backends/cadence/generic/operators:op_quantized_conv2d"])
130+
define_operator("quantized_conv2d_nhwc_out", deps=["//executorch/backends/cadence/generic/operators:op_quantized_conv2d"])
131131

132132
# quantized_matmul_out needs additional dependency for int16 support
133-
define_operator("quantized_matmul_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_matmul"], exported_headers=["op_quantized_matmul_out.h"])
133+
define_operator("quantized_matmul_out", deps=["//executorch/backends/cadence/generic/operators:op_quantized_matmul"], exported_headers=["op_quantized_matmul_out.h"])

0 commit comments

Comments
 (0)