ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Convert.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Convert.h"
18
19#include "Swizzle.h"
20#include "ir/DataType.h"
22#include <memory>
23
24namespace onert
25{
26namespace backend
27{
28namespace acl_common
29{
30
31::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, bool apply_dim_correction)
32{
33 // If shape's rank is 0, the tensor is a scalar
34 // Sometimes, some ACL kernel can use a scalar as tensor. But ACL does not allocate buffer for
35 // tensor having rank as 0.
36 const auto tensor_shape = shape.rank() == 0 ? ir::Shape{1} : shape;
37
38 const uint32_t rank = tensor_shape.rank();
39
40 ::arm_compute::TensorShape res{};
41
42 res.set_num_dimensions(rank);
43
44 for (uint32_t axis = 0; axis < rank; ++axis)
45 {
46 // NOTE In some cases, in incorrect dimensions is required.
47 // For example, intput_size is 1 in LSTM. The input-to-input weights([num_units, input_size]) of
48 // LSTM is used as the weight of the FullyConnected.
49 // The FullyConnected's weight must be greater or equal than 2-dimensions.
50 // However, if the dimension correction is applied to input_to_input_weights with input_size
51 // equal to 1, it will be changed to 1-D.
52 // So input_to_input_weights is not used by the weight of FullyConnected.
53 res.set(ToARMComputeAxis(rank, axis).value(), tensor_shape.dim(axis), apply_dim_correction);
54 }
55
56 return res;
57}
58
59::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord)
60{
61 const uint32_t rank = coord.size();
62
63 ::arm_compute::Coordinates res{};
64
65 res.set_num_dimensions(rank);
66
67 for (uint32_t axis = 0; axis < rank; ++axis)
68 {
69 res.set(ToARMComputeAxis(rank, axis).value(), coord[axis]);
70 }
71
72 return res;
73}
74
75::arm_compute::DataType asDataType(const ir::DataType type)
76{
77 switch (type)
78 {
79 case ir::DataType::FLOAT32:
80 return ::arm_compute::DataType::F32;
81 case ir::DataType::INT32:
82 return ::arm_compute::DataType::S32;
83 case ir::DataType::UINT32:
84 return ::arm_compute::DataType::U32;
85 case ir::DataType::QUANT_UINT8_ASYMM:
86 return ::arm_compute::DataType::QASYMM8;
87 case ir::DataType::BOOL8:
88 case ir::DataType::UINT8:
89 return ::arm_compute::DataType::U8;
90 case ir::DataType::QUANT_INT8_SYMM:
91 return ::arm_compute::DataType::QSYMM8;
92 case ir::DataType::QUANT_INT8_ASYMM:
93 return ::arm_compute::DataType::QASYMM8_SIGNED;
94 case ir::DataType::FLOAT16:
95 return ::arm_compute::DataType::F16;
96 case ir::DataType::INT64:
97 return ::arm_compute::DataType::S64;
98 case ir::DataType::QUANT_INT16_ASYMM:
99 return ::arm_compute::DataType::QASYMM16;
100 case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL:
101 return ::arm_compute::DataType::QSYMM8_PER_CHANNEL;
102 default:
103 throw std::runtime_error("Not supported internal data type, yet");
104 break;
105 }
106}
107
108::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
109{
110 return ::arm_compute::QuantizationInfo(scale, offset);
111}
112
113::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
114 bool apply_dim_correction)
115{
116 ::arm_compute::TensorInfo info(asTensorShape(shape, apply_dim_correction), 1,
117 asDataType(typeInfo.type()),
118 asQuantizationInfo(typeInfo.scale(), typeInfo.zero_point()));
119 info.set_data_layout(::arm_compute::DataLayout::NHWC);
120 return info;
121}
122
123::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
124 const ir::Stride &stride)
125{
126 return ::arm_compute::PadStrideInfo{stride.horizontal,
127 stride.vertical,
128 padding.left,
129 padding.right,
130 padding.top,
131 padding.bottom,
132 ::arm_compute::DimensionRoundingType::FLOOR};
133}
134
135::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
136{
137 switch (act_code)
138 {
140 return ::arm_compute::ActivationLayerInfo{};
142 return ::arm_compute::ActivationLayerInfo{
143 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
145 return ::arm_compute::ActivationLayerInfo{
146 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
148 return ::arm_compute::ActivationLayerInfo{
149 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
150 // Cases for activation of LSTM.
152 return ::arm_compute::ActivationLayerInfo{
153 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
155 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
156 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
157 // 0(always sigmoid) regardless of values of the parameter.
158 // If ACL support non-sigmoid logistic, should fix param values.
159 return ::arm_compute::ActivationLayerInfo{
160 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC, 0.0f, 0.0f};
161 default:
162 throw std::runtime_error{"Not supported internal activation, yet"};
163 break;
164 }
165}
166
167::arm_compute::ActivationLayerInfo
169 float beta)
170{
171 switch (op_type)
172 {
174 if (beta == 0.f)
175 {
177 {
178 return ::arm_compute::ActivationLayerInfo{
179 ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
180 }
181 else
182 {
183 return ::arm_compute::ActivationLayerInfo{
184 ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, alpha};
185 }
186 }
187 else
188 {
189 return ::arm_compute::ActivationLayerInfo{
190 ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, alpha, beta};
191 }
193 return ::arm_compute::ActivationLayerInfo{
194 ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, alpha, beta};
196 // NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
197 // TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
198 // 0(always sigmoid) regardless of values of the parameter.
199 // If ACL support non-sigmoid logistic, should fix param values.
200 return ::arm_compute::ActivationLayerInfo{
201 ::arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC};
203 return ::arm_compute::ActivationLayerInfo{
204 ::arm_compute::ActivationLayerInfo::ActivationFunction::LEAKY_RELU, alpha};
205 default:
206 throw std::runtime_error{"Not supported internal elementwise activation, yet"};
207 break;
208 }
209}
210
211arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank)
212{
213 std::set<uint32_t> axes = asSet(operand, rank);
214
215 arm_compute::Coordinates reduce_axes;
216 for (const int32_t axis : axes)
217 {
218 reduce_axes.set(reduce_axes.num_dimensions(), axis);
219 }
220
221 return reduce_axes;
222}
223
224std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank)
225{
226 std::set<std::uint32_t> axes;
227
228 for (size_t i = 0; i < operand.shape().num_elements(); ++i)
229 {
230 int32_t axis = 0;
231 switch (operand.typeInfo().type())
232 {
233 case ir::DataType::INT32:
234 axis = reinterpret_cast<const int32_t *>(operand.data()->base())[i];
235 break;
236 case ir::DataType::INT64:
237 axis = reinterpret_cast<const int64_t *>(operand.data()->base())[i];
238 break;
239 default:
240 throw std::runtime_error("acl_common::asSet: Not supported data type");
241 }
242 if (axis < 0)
243 axis += rank;
244 axes.insert(ToARMComputeAxis(rank, axis).value());
245 }
246
247 return axes;
248}
249
250std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer)
251{
252 return std::make_unique<AclFunction>(std::move(layer));
253}
254
255ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
256{
257 switch (data_type)
258 {
259 case ::arm_compute::DataType::F32:
260 return ir::DataType::FLOAT32;
261 case ::arm_compute::DataType::S32:
262 return ir::DataType::INT32;
263 case ::arm_compute::DataType::U32:
264 return ir::DataType::UINT32;
265 case ::arm_compute::DataType::QASYMM8:
266 return ir::DataType::QUANT_UINT8_ASYMM;
267 case ::arm_compute::DataType::QASYMM8_SIGNED:
268 return ir::DataType::QUANT_INT8_ASYMM;
269 case ::arm_compute::DataType::U8:
270 return ir::DataType::UINT8;
271 case ::arm_compute::DataType::QSYMM8:
272 return ir::DataType::QUANT_INT8_SYMM;
273 case ::arm_compute::DataType::F16:
274 return ir::DataType::FLOAT16;
275 case ::arm_compute::DataType::S64:
276 return ir::DataType::INT64;
277 default:
278 throw std::runtime_error{"Not supported acl data type, yet"};
279 break;
280 }
281}
282
283arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
284{
285 switch (pool_type_ir)
286 {
288 return arm_compute::PoolingType::AVG;
290 return arm_compute::PoolingType::L2;
292 return arm_compute::PoolingType::MAX;
293 default:
294 throw std::runtime_error("convertPoolType: Not supported operation yet");
295 }
296}
297
298arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
299{
300 switch (reduce_type_ir)
301 {
303 return arm_compute::ReductionOperation::MAX;
305 return arm_compute::ReductionOperation::MIN;
307 return arm_compute::ReductionOperation::SUM;
308 default:
309 throw std::runtime_error("convertReduceType: Not supported operation yet");
310 }
311}
312
313arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
314{
315 assert(operand.isConstant());
316 assert(operand.shape().num_elements() == 1);
317 switch (operand.typeInfo().type())
318 {
319 case ir::DataType::INT32:
320 return arm_compute::PixelValue(operand.asScalar<int32_t>());
321 case ir::DataType::INT64:
322 return arm_compute::PixelValue(operand.asScalar<int64_t>());
323 case ir::DataType::UINT32:
324 return arm_compute::PixelValue(operand.asScalar<uint64_t>());
325 case ir::DataType::UINT8:
326 return arm_compute::PixelValue(operand.asScalar<uint8_t>());
327 case ir::DataType::FLOAT32:
328 return arm_compute::PixelValue(operand.asScalar<float>());
329 default:
330 throw std::runtime_error("asPixelValue : Not supported datatype yet");
331 }
332}
333
334arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
335{
336 assert(dilation_width != 0);
337 assert(dilation_height != 0);
338
339 return arm_compute::Size2D(dilation_width, dilation_height);
340}
341
342} // namespace acl_common
343} // namespace backend
344} // namespace onert
const Dimension & dim(uint32_t axis) const
Definition TensorShape.h:38
uint32_t rank(void) const
Definition TensorShape.h:35
Class to represent position(offset) of tensor. Assume that the front is higher dimensional....
Definition Coordinates.h:37
size_t size() const
Return size of coordinates.
Definition Coordinates.h:95
const TypeInfo & typeInfo(void) const
Definition Operand.h:47
T asScalar(void) const
Definition Operand.h:88
const Shape & shape(void) const
Definition Operand.h:46
void data(std::shared_ptr< Data > &&data)
Definition Operand.h:64
bool isConstant(void) const
Get true if Operand is const, otherwise false a.
Definition Operand.h:79
float scale() const
Definition TypeInfo.h:53
int32_t zero_point() const
Definition TypeInfo.h:55
DataType type() const
Definition TypeInfo.h:52
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
volatile const char info[]
arm_compute::PoolingType convertPoolType(ir::operation::Pool2D::PoolType pool_type_ir)
Definition Convert.cc:283
ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
Definition Swizzle.h:49
::arm_compute::Coordinates asTensorCoordinate(const ir::Coordinates &coord)
Definition Convert.cc:59
std::set< uint32_t > asSet(const ir::Operand &operand, int32_t rank)
Definition Convert.cc:224
::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
Definition Convert.cc:135
arm_compute::ReductionOperation convertReduceType(ir::operation::Reduce::ReduceType reduce_type_ir)
Definition Convert.cc:298
arm_compute::Coordinates asCoordinates(const ir::Operand &operand, int32_t rank)
Definition Convert.cc:211
arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
Definition Convert.cc:313
arm_compute::Size2D asDilation(uint32_t dilation_width, uint32_t dilation_height)
Definition Convert.cc:334
::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding, const ir::Stride &stride)
Definition Convert.cc:123
std::unique_ptr< AclFunction > asAclFunction(std::unique_ptr<::arm_compute::IFunction > &&layer)
Definition Convert.cc:250
::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, bool apply_dim_correction)
Definition Convert.cc:31
::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, bool apply_dim_correction)
Definition Convert.cc:113
::arm_compute::DataType asDataType(const ir::DataType type)
Definition Convert.cc:75
::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset)
Definition Convert.cc:108
ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
Definition Convert.cc:255