ONE - On-device Neural Engine
Loading...
Searching...
No Matches
OperationUtils.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
18#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
19
21#include <ir/DataType.h>
22#include <ir/Operand.h>
23#include <ir/Padding.h>
24#include <ir/operation/RoPE.h>
26
27#include <cker/Shape.h>
28#include <cker/Types.h>
29
30#include <limits>
31#include <vector>
32
34using namespace onert::util;
35
36namespace onert
37{
38namespace backend
39{
40namespace cpu
41{
42namespace ops
43{
44
45union DataPtr {
46 uint8_t *u8;
47 int8_t *i8;
48 uint32_t *u32;
49 int32_t *i32;
50 bool *b;
51 float *f;
52 int64_t *i64;
53 void *v;
54};
55
57 const uint8_t *u8;
58 const int8_t *i8;
59 const uint32_t *u32;
60 const int32_t *i32;
61 const bool *b;
62 const float *f;
63 const int64_t *i64;
64 const void *v;
65};
66
67uint32_t getNumberOfDimensions(const IPortableTensor *tensor);
68
69uint32_t getNumberOfElements(const IPortableTensor *tensor);
70
71uint32_t getSizeOfDimension(const IPortableTensor *tensor, uint32_t dimensionIdx);
72
74{
75 assert(tensor);
76 const int32_t extended_rank = 4;
77 int32_t raw_shape[extended_rank];
78 auto shape = tensor->getShape();
79 uint32_t src = extended_rank - shape.rank();
80 for (uint32_t i = 0; i < extended_rank; ++i)
81 {
82 if (i < src)
83 {
84 raw_shape[i] = 1;
85 }
86 else
87 {
88 raw_shape[i] = shape.dim(i - src);
89 }
90 }
91
92 return nnfw::cker::Shape(extended_rank, raw_shape);
93}
94
96{
97 if (tensor == nullptr)
98 return nnfw::cker::Shape();
99
100 const ir::Shape &shape = tensor->get_info().shape();
101 auto rank = shape.rank();
102 nnfw::cker::Shape ret(rank);
103 auto data = ret.DimsData();
104 for (int i = 0; i < rank; ++i)
105 {
106 data[i] = shape.dim(i);
107 }
108 return ret;
109}
110
113{
114 switch (activation)
115 {
128 default:
129 throw std::runtime_error{"CPU backend: Cannot convert activation type"};
130 }
131}
132
133inline int32_t getAxis(uint32_t rank, int32_t axis)
134{
135 auto ret = axis;
136
137 if (axis < 0)
138 {
139 ret += rank;
140 }
141
142 return ret;
143}
144
145void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
146
148 const IPortableTensor *filterDescr,
149 const IPortableTensor *biasDescr,
150 const IPortableTensor *outputDescr, double *multiplier);
151
152void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
153 int *left_shift);
154
156 float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size,
157 int num_channels, std::vector<int32_t> &per_channel_output_multiplier,
158 std::vector<int> &per_channel_output_shift);
159
161 int32_t *act_min, int32_t *act_max);
162
163bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2);
164
165int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
166
167uint32_t sizeOfData(OperandType type, const std::vector<int32_t> &dimensions);
168
169nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type);
170
171std::vector<int32_t> getReducerAxes(const IPortableTensor *axes);
172
174
175template <typename T> const T *getBuffer(const IPortableTensor *tensor)
176{
177 return reinterpret_cast<const T *>(tensor->buffer());
178}
179
180template <typename T> T *getBuffer(IPortableTensor *tensor)
181{
182 return reinterpret_cast<T *>(tensor->buffer());
183}
184
185template <> inline const bool *getBuffer(const IPortableTensor *tensor)
186{
187 static_assert(sizeof(bool) == 1, "cpu backend supports bool type which is 1 byte");
188 return reinterpret_cast<const bool *>(tensor->buffer());
189}
190
191template <> inline bool *getBuffer(IPortableTensor *tensor)
192{
193 static_assert(sizeof(bool) == 1, "cpu backend supports bool type which is 1 byte");
194 return reinterpret_cast<bool *>(tensor->buffer());
195}
196
197} // namespace ops
198} // namespace cpu
199} // namespace backend
200} // namespace onert
201
202#endif // __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
OperandType
Definition OperandType.h:24
int32_t * DimsData()
Definition Shape.h:112
A tensor class that is portable for other backends.
uint32_t getNumberOfElements(const Shape &shape)
Definition Shape.cpp:48
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
PaddingType
Definition Types.h:41
FusedActivationFunctionType
Definition Types.h:32
const T * getBuffer(const IPortableTensor *tensor)
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
nnfw::cker::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
nnfw::cker::RoPEMode getRoPEMode(ir::operation::RoPE::RoPEMode rope_mode)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultipliersAndShifts(float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift)
int32_t getAxis(uint32_t rank, int32_t axis)
void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
uint32_t sizeOfData(OperandType type, const std::vector< int32_t > &dimensions)
void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, int *left_shift)
std::vector< int32_t > getReducerAxes(const IPortableTensor *axes)
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)
nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier)
bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2)