ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Sub.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMRuntimeShape.h"
21
22#include "execute/OMUtils.h"
25
26#include "PALSub.h"
27
28using namespace onert_micro;
29using namespace onert_micro::execute;
30
31namespace
32{
33
34constexpr uint32_t numInput = 2;
35constexpr uint32_t numOutput = 1;
36
37constexpr uint32_t input1TensorIdx = 0;
38constexpr uint32_t input2TensorIdx = 1;
39constexpr uint32_t outputTensorIdx = 0;
40
41} // namespace
42
43// NOTE: doesnt currently support dynamic shapes
44// TODO: reduce code duplication with Add, Mul
45namespace onert_micro
46{
47namespace execute
48{
49
51{
52 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
53 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
54 uint16_t op_index = execute_args.kernel_index;
55
56 const circle::Tensor *input1;
57 const circle::Tensor *input2;
58 const circle::Tensor *output;
59
60 uint8_t *input1_data;
61 uint8_t *input2_data;
62 uint8_t *output_data;
63
64 const circle::SubOptions *options;
65 // Read kernel
66 {
67 execute::OMRuntimeKernel runtime_kernel;
68 runtime_kernel.readKernel(op_index, runtime_context);
69
70 input1 = runtime_kernel.inputs[input1TensorIdx];
71 input2 = runtime_kernel.inputs[input2TensorIdx];
72 output = runtime_kernel.outputs[outputTensorIdx];
73 assert(input1 != nullptr);
74 assert(input2 != nullptr);
75 assert(output != nullptr);
76
77 runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
78
79 input1_data = runtime_kernel.inputs_data[input1TensorIdx];
80 input2_data = runtime_kernel.inputs_data[input2TensorIdx];
81 output_data = runtime_kernel.outputs_data[outputTensorIdx];
82 assert(input1_data != nullptr);
83 assert(input2_data != nullptr);
84 assert(output_data != nullptr);
85
86 options = runtime_kernel.first_operator->builtin_options_as_SubOptions();
87 }
88
89 OMStatus status;
90
91 core::OMRuntimeShape input1_shape(input1);
92 core::OMRuntimeShape input2_shape(input2);
94
96 const bool need_broadcast = pal::processBroadcastShapes(input1_shape, input2_shape, &params);
97
98 switch (input1->type())
99 {
100#ifndef DIS_FLOAT
101 case circle::TensorType_FLOAT32:
102 {
103 status = execute::calculateActivationRange(options->fused_activation_function(),
104 &params.float_activation_min,
105 &params.float_activation_max);
106
107 if (need_broadcast)
108 {
110 params, input1_shape, core::utils::castInputData<float>(input1_data), input2_shape,
111 core::utils::castInputData<float>(input2_data), output_shape,
112 core::utils::castOutputData<float>(output_data));
113 }
114 else
115 {
116 status =
117 pal::Sub(params, input1_shape.flatSize(), core::utils::castInputData<float>(input1_data),
118 core::utils::castInputData<float>(input2_data),
119 core::utils::castOutputData<float>(output_data));
120 }
121 }
122 break;
123 case circle::TensorType_INT64:
124 {
125 status = execute::calculateActivationRange(options->fused_activation_function(),
126 &params.int64_activation_min,
127 &params.int64_activation_max);
128
129 if (need_broadcast)
130 {
132 params, input1_shape, core::utils::castInputData<int64_t>(input1_data), input2_shape,
133 core::utils::castInputData<int64_t>(input2_data), output_shape,
134 core::utils::castOutputData<int64_t>(output_data));
135 }
136 else
137 {
138 status = pal::Sub(params, input1_shape.flatSize(),
139 core::utils::castInputData<int64_t>(input1_data),
140 core::utils::castInputData<int64_t>(input2_data),
141 core::utils::castOutputData<int64_t>(output_data));
142 }
143 }
144 break;
145 case circle::TensorType_INT32:
146 {
147 status = execute::calculateActivationRange(options->fused_activation_function(),
148 &params.int32_activation_min,
149 &params.int32_activation_max);
150
151 if (need_broadcast)
152 {
154 params, input1_shape, core::utils::castInputData<int32_t>(input1_data), input2_shape,
155 core::utils::castInputData<int32_t>(input2_data), output_shape,
156 core::utils::castOutputData<int32_t>(output_data));
157 }
158 else
159 {
160 status = pal::Sub(params, input1_shape.flatSize(),
161 core::utils::castInputData<int32_t>(input1_data),
162 core::utils::castInputData<int32_t>(input2_data),
163 core::utils::castOutputData<int32_t>(output_data));
164 }
165 }
166 break;
167#endif // DIS_FLOAT
168#ifndef DIS_QUANT
169 case circle::TensorType_INT8:
170 {
171 core::ArithmeticQuantParams sub_params{};
172
173 calculateQuantParams(sub_params, input1, input2, output,
174 options->fused_activation_function());
175
176 if (need_broadcast)
177 {
179 sub_params, input1_shape, core::utils::castInputData<int8_t>(input1_data), input2_shape,
180 core::utils::castInputData<int8_t>(input2_data), output_shape,
181 core::utils::castOutputData<int8_t>(output_data));
182 }
183 else
184 {
185 status = pal::Sub(sub_params, input1_shape.flatSize(),
186 core::utils::castInputData<int8_t>(input1_data),
187 core::utils::castInputData<int8_t>(input2_data),
188 core::utils::castOutputData<int8_t>(output_data));
189 }
190 }
191 break;
192#endif // DIF_QUANT
193 default:
194 {
195 status = UnsupportedType;
196 assert(false && "Unsupported type.");
197 }
198 }
199
200 return status;
201}
202
203} // namespace execute
204} // namespace onert_micro
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t input1TensorIdx
constexpr uint32_t outputTensorIdx
constexpr uint32_t input2TensorIdx
OMStatus Sub(const core::BinaryArithmeticBroadcastParams &params, const int flat_size, const T *input1_data, const T *input2_data, T *output_data)
bool processBroadcastShapes(const core::OMRuntimeShape &shape0, const core::OMRuntimeShape &shape1, core::BinaryArithmeticBroadcastParams *params)
OMStatus BroadcastSub4DSlow(const core::BinaryArithmeticBroadcastParams &params, const core::OMRuntimeShape &input1_shape, const T *input1_data, const core::OMRuntimeShape &input2_shape, const T *input2_data, const core::OMRuntimeShape &output_shape, T *output_data)
void calculateQuantParams(core::ArithmeticQuantParams &params, const circle::Tensor *input1, const circle::Tensor *input2, const circle::Tensor *output, circle::ActivationFunctionType act)
Definition OMUtils.cpp:194
OMStatus calculateActivationRange(circle::ActivationFunctionType activation, T *activation_min, T *activation_max)
Definition OMUtils.h:36
OMStatus execute_kernel_CircleSub(const OMExecuteArgs &execute_args)
Definition Sub.cpp:50
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage