ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
OperationUtils.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
18#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
19
21#include <ir/DataType.h>
22#include <ir/Operand.h>
23#include <ir/Padding.h>
24#include <ir/operation/RoPE.h>
26
27#include <cker/Shape.h>
28#include <cker/Types.h>
29
30#include <limits>
31#include <vector>
32
34using namespace onert::util;
35
37{
38
39union DataPtr {
40 uint8_t *u8;
41 int8_t *i8;
42 uint32_t *u32;
43 int32_t *i32;
44 bool *b;
45 float *f;
46 int64_t *i64;
47 void *v;
48};
49
51 const uint8_t *u8;
52 const int8_t *i8;
53 const uint32_t *u32;
54 const int32_t *i32;
55 const bool *b;
56 const float *f;
57 const int64_t *i64;
58 const void *v;
59};
60
61uint32_t getNumberOfDimensions(const IPortableTensor *tensor);
62
63uint32_t getNumberOfElements(const IPortableTensor *tensor);
64
65uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx);
66
68{
69 assert(tensor);
70 const int32_t extended_rank = 4;
71 int32_t raw_shape[extended_rank];
72 auto shape = tensor->getShape();
73 uint32_t src = extended_rank - shape.rank();
74 for (uint32_t i = 0; i < extended_rank; ++i)
75 {
76 if (i < src)
77 {
78 raw_shape[i] = 1;
79 }
80 else
81 {
82 raw_shape[i] = shape.dim(i - src);
83 }
84 }
85
86 return nnfw::cker::Shape(extended_rank, raw_shape);
87}
88
90{
91 if (tensor == nullptr)
92 return nnfw::cker::Shape();
93
94 const ir::Shape &shape = tensor->get_info().shape();
95 auto rank = shape.rank();
96 nnfw::cker::Shape ret(rank);
97 auto data = ret.DimsData();
98 for (int i = 0; i < rank; ++i)
99 {
100 data[i] = shape.dim(i);
101 }
102 return ret;
103}
104
107{
108 switch (activation)
109 {
122 default:
123 throw std::runtime_error{"CPU backend: Cannot convert activation type"};
124 }
125}
126
127inline int32_t getAxis(uint32_t rank, int32_t axis)
128{
129 auto ret = axis;
130
131 if (axis < 0)
132 {
133 ret += rank;
134 }
135
136 return ret;
137}
138
139void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
140
142 const IPortableTensor *filterDescr,
143 const IPortableTensor *biasDescr,
144 const IPortableTensor *outputDescr, double *multiplier);
145
146void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
147 int *left_shift);
148
150 float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size,
151 int num_channels, std::vector<int32_t> &per_channel_output_multiplier,
152 std::vector<int> &per_channel_output_shift);
153
155 int32_t *act_min, int32_t *act_max);
156
157bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2);
158
159int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
160
161uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions);
162
163nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type);
164
165std::vector<int32_t> getReducerAxes(const IPortableTensor *axes);
166
168
169template <typename T> const T *getBuffer(const IPortableTensor *tensor)
170{
171 return reinterpret_cast<const T *>(tensor->buffer());
172}
173
174template <typename T> T *getBuffer(IPortableTensor *tensor)
175{
176 return reinterpret_cast<T *>(tensor->buffer());
177}
178
179template <> inline const bool *getBuffer(const IPortableTensor *tensor)
180{
181 static_assert(sizeof(bool) == 1, "cpu backend supports bool type which is 1 byte");
182 return reinterpret_cast<const bool *>(tensor->buffer());
183}
184
185template <> inline bool *getBuffer(IPortableTensor *tensor)
186{
187 static_assert(sizeof(bool) == 1, "cpu backend supports bool type which is 1 byte");
188 return reinterpret_cast<bool *>(tensor->buffer());
189}
190
191} // namespace onert::backend::cpu::ops
192
193#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
OperandType
Definition OperandType.h:24
int32_t * DimsData()
Definition Shape.h:112
A tensor class that is portable for other backends.
uint32_t getNumberOfElements(const Shape &shape)
Definition Shape.cpp:48
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
PaddingType
Definition Types.h:41
FusedActivationFunctionType
Definition Types.h:32
const T * getBuffer(const IPortableTensor *tensor)
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
nnfw::cker::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
nnfw::cker::RoPEMode getRoPEMode(ir::operation::RoPE::RoPEMode rope_mode)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultipliersAndShifts(float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift)
int32_t getAxis(uint32_t rank, int32_t axis)
void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
uint32_t sizeOfData(OperandType type, const std::vector< int32_t > &dimensions)
void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, int *left_shift)
std::vector< int32_t > getReducerAxes(const IPortableTensor *axes)
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)
nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier)
bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2)