ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALBinaryOpCommon.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef LUCI_INTERPRETER_PAL_BINARYOPCOMMON_H
19#define LUCI_INTERPRETER_PAL_BINARYOPCOMMON_H
20
21#include "Params.h"
22#include "PALUtils.h"
24
26{
27
28template <typename T, std::enable_if_t<std::is_floating_point<T>::value, bool> = true>
30{
31 T operator()(T lhs, T rhs)
32 {
33 return std::floor(static_cast<double>(lhs) / static_cast<double>(rhs));
34 }
35};
36template <typename T, std::enable_if_t<std::is_floating_point<T>::value, bool> = true>
38{
39 T operator()(T lhs, T rhs)
40 {
41 T trunc_mod = std::fmod(lhs, rhs);
42 return (trunc_mod != 0) && ((rhs < 0) != (trunc_mod < 0)) ? (trunc_mod + rhs) : trunc_mod;
43 }
44};
45template <typename T> struct MaximumFn
46{
47 T operator()(T lhs, T rhs) { return std::max(lhs, rhs); }
48};
49template <typename T> struct MinimumFn
50{
51 T operator()(T lhs, T rhs) { return std::min(lhs, rhs); }
52};
53
54// TODO: check if there real activation value
55template <typename T, typename Fn>
56inline void BinaryOp(const int flat_size, const T *input1_data, const T *input2_data,
57 T *output_data)
58{
59 Fn func;
60 for (int i = 0; i < flat_size; ++i)
61 {
62 output_data[i] = func(input1_data[i], input2_data[i]);
63 }
64}
65
66template <typename T, typename Fn>
68 const float *input1_data,
69 const luci_interpreter::RuntimeShape &input2_shape,
70 const float *input2_data,
72 float *output_data)
73{
76 NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, &desc2);
77
78 const luci_interpreter::RuntimeShape extended_output_shape =
80
81 // In Tensorflow, the dimensions are canonically named (batch_number, row,
82 // col, channel), with extents (batches, height, width, depth), with the
83 // trailing dimension changing most rapidly (channels has the smallest stride,
84 // typically 1 element).
85 //
86 // In generated C code, we store arrays with the dimensions reversed. The
87 // first dimension has smallest stride.
88 //
89 // We name our variables by their Tensorflow convention, but generate C code
90 // nesting loops such that the innermost loop has the smallest stride for the
91 // best cache behavior.
92
93 Fn func;
94 for (int b = 0; b < extended_output_shape.dims(0); ++b)
95 {
96 for (int y = 0; y < extended_output_shape.dims(1); ++y)
97 {
98 for (int x = 0; x < extended_output_shape.dims(2); ++x)
99 {
100 for (int c = 0; c < extended_output_shape.dims(3); ++c)
101 {
102 const int output_data_offset =
103 ((b * extended_output_shape.dims(1) + y) * extended_output_shape.dims(2) + x) *
104 extended_output_shape.dims(3) +
105 c;
106
107 output_data[output_data_offset] = func(input1_data[subscriptToIndex(desc1, b, y, x, c)],
108 input2_data[subscriptToIndex(desc2, b, y, x, c)]);
109 }
110 }
111 }
112 }
113}
114
115} // namespace luci_interpreter_pal
116
117#endif // LUCI_INTERPRETER_PAL_BINARYOPCOMMON_H
int32_t dims(int i) const
Definition Tensor.h:108
static RuntimeShape extendedShape(int new_shape_size, const RuntimeShape &shape)
Definition Tensor.h:95
NdArrayDesc< 4 > desc1
const luci_interpreter::RuntimeShape output_shape
NdArrayDesc< 4 > desc2
int subscriptToIndex(const NdArrayDesc< 4 > &desc, int i0, int i1, int i2, int i3)
void BroadcastBinaryOp4DSlow(const luci_interpreter::RuntimeShape &input1_shape, const float *input1_data, const luci_interpreter::RuntimeShape &input2_shape, const float *input2_data, const luci_interpreter::RuntimeShape &output_shape, float *output_data)
void NdArrayDescsForElementwiseBroadcast(const luci_interpreter::RuntimeShape &input0_shape, const luci_interpreter::RuntimeShape &input1_shape, NdArrayDesc< N > *desc0_out, NdArrayDesc< N > *desc1_out)
void BinaryOp(const int flat_size, const T *input1_data, const T *input2_data, T *output_data)