ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ConvolutionLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ConvolutionLayer.h"
18#include "OperationUtils.h"
20
21#include "../Tensor.h"
22#include "ir/Padding.h"
23#include <cker/operation/Conv.h>
24
25namespace onert
26{
27namespace backend
28{
29namespace cpu
30{
31namespace ops
32{
34 : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
35 _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
36 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
37 _dilationHeightFactor(1), _activation(ir::Activation::NONE),
38 _conv_kernel(new nnfw::cker::Conv()), _prepare(false), _is_cachable_weights(false),
39 _is_hybrid(false)
40{
41 // DO NOTHING
42}
43
45
46void ConvolutionLayer::convFloat32()
47{
48 float output_activation_min = 0, output_activation_max = 0;
49 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
50
51 nnfw::cker::ConvParams op_params;
55 op_params.stride_width = _strideWidth;
56 op_params.stride_height = _strideHeight;
59 op_params.float_activation_min = output_activation_min;
60 op_params.float_activation_max = output_activation_max;
61
63 kernel(op_params, getShape(_input), getBuffer<float>(_input), getShape(_kernel),
64 getBuffer<float>(_kernel), getShape(_bias), getBuffer<float>(_bias), getShape(_output),
65 getBuffer<float>(_output));
66}
67
68void ConvolutionLayer::convQ8uPerTensor()
69{
70 int32_t output_activation_min = 0;
71 int32_t output_activation_max = 0;
73 &output_activation_max);
74
75 double real_multiplier = 0.0;
76 int32_t output_multiplier = 0;
77 int32_t output_shift = 0;
79 QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
80
81 nnfw::cker::ConvParams op_params;
82 op_params.stride_width = _strideWidth;
83 op_params.stride_height = _strideHeight;
86 op_params.padding_type = getPaddingType(_paddingType);
89 op_params.input_offset = -_input->data_zero_point();
92 op_params.output_multiplier = output_multiplier;
93 op_params.output_shift = output_shift;
94 op_params.quantized_activation_min = output_activation_min;
95 op_params.quantized_activation_max = output_activation_max;
96 op_params.is_replaced_weights = true;
97
99 kernel(op_params, getShape(_input), getBuffer<uint8_t>(_input), getShape(_kernel),
100 getBuffer<uint8_t>(_kernel), getShape(_bias), getBuffer<int32_t>(_bias), getShape(_output),
101 getBuffer<uint8_t>(_output));
102}
103
104void ConvolutionLayer::convQ8uPerChannel()
105{
106 nnfw::cker::ConvParams op_params;
109 op_params.stride_width = _strideWidth;
110 op_params.stride_height = _strideHeight;
113 op_params.input_offset = -_input->data_zero_point();
115 int32_t output_activation_min = 0;
116 int32_t output_activation_max = 0;
118 &output_activation_max);
119 op_params.quantized_activation_min = output_activation_min;
120 op_params.quantized_activation_max = output_activation_max;
121 // NOTE: The following fields of ConvParams are not used:
122 // padding_type, weights_offset, output_{multiplier,shift}, float_activation_{min,max}
123
125 kernel(op_params, getShape(_input), getBuffer<uint8_t>(_input), getShape(_kernel),
126 getBuffer<uint8_t>(_kernel), _kernel->data_zero_points().data(), getShape(_bias),
127 getBuffer<int32_t>(_bias), getShape(_output), getBuffer<uint8_t>(_output));
128}
129
130void ConvolutionLayer::convQ8i()
131{
132 int32_t output_activation_min = 0;
133 int32_t output_activation_max = 0;
135 &output_activation_max);
136
137 nnfw::cker::ConvParams op_params;
138 op_params.input_offset = -_input->data_zero_point();
140 op_params.stride_height = _strideHeight;
141 op_params.stride_width = _strideWidth;
146 op_params.quantized_activation_min = output_activation_min;
147 op_params.quantized_activation_max = output_activation_max;
148
150 kernel(op_params, getShape(_input), reinterpret_cast<const int8_t *>(_input->buffer()),
151 getShape(_kernel), reinterpret_cast<const int8_t *>(_kernel->buffer()), getShape(_bias),
152 reinterpret_cast<const int32_t *>(_bias->buffer()), getShape(_output),
153 reinterpret_cast<int8_t *>(_output->buffer()));
154}
155
156void ConvolutionLayer::convQ8iHybridPerChannel()
157{
158 float output_activation_min = 0;
159 float output_activation_max = 0;
160 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
161
162 const int batch_size = getShape(_input).Dims(0);
163 if (batch_size == 0)
164 throw std::runtime_error{"Convolution input batch_size = 0"};
165 auto input_shape = getShape(_input);
166 const int input_size = input_shape.FlatSize() / batch_size;
167
168 auto input_quantized_ptr = _hybrid_arena->input_quantized.data();
169 auto input_scaling_factors_ptr = _hybrid_arena->input_scaling_factors.data();
170 auto input_offsets_ptr = _hybrid_arena->input_offsets.data();
171 for (int b = 0; b < batch_size; ++b)
172 {
173 const int offset = b * input_size;
175 reinterpret_cast<const float *>(_input->buffer()) + offset, input_size,
176 input_quantized_ptr + offset, &input_scaling_factors_ptr[b], &input_offsets_ptr[b]);
177 }
178 nnfw::cker::ConvParams op_params;
182 op_params.stride_width = _strideWidth;
183 op_params.stride_height = _strideHeight;
186 op_params.float_activation_min = output_activation_min;
187 op_params.float_activation_max = output_activation_max;
188
189 const auto *filter_per_channel_scales = _kernel->data_scales().data();
191 op_params, input_scaling_factors_ptr, getShape(_input), input_quantized_ptr, getShape(_kernel),
192 reinterpret_cast<const int8_t *>(_kernel->buffer()), getShape(_bias),
193 reinterpret_cast<const float *>(_bias->buffer()), getShape(_output),
194 reinterpret_cast<float *>(_output->buffer()), filter_per_channel_scales, input_offsets_ptr);
195}
196
198 const IPortableTensor *bias, const ir::PaddingType paddingType,
199 const uint32_t paddingLeft, const uint32_t paddingRight,
200 const uint32_t paddingTop, const uint32_t paddingBottom,
201 const uint32_t strideWidth, const uint32_t strideHeight,
202 const uint32_t dilationWidthFactor,
203 const uint32_t dilationHeightFactor,
204 const ir::Activation activation, IPortableTensor *output,
205 bool is_cachable_weights)
206{
207 _input = input;
208 _kernel = kernel;
209 _bias = bias;
210 _paddingType = paddingType;
211 _paddingLeft = paddingLeft;
212 _paddingRight = paddingRight;
213 _paddingTop = paddingTop;
214 _paddingBottom = paddingBottom;
215 _strideWidth = strideWidth;
216 _strideHeight = strideHeight;
217 _dilationWidthFactor = dilationWidthFactor;
218 _dilationHeightFactor = dilationHeightFactor;
219 _activation = activation;
220 _output = output;
221 _is_cachable_weights = is_cachable_weights;
222 _is_hybrid = _input->data_type() == OperandType::FLOAT32 &&
223 _kernel->data_type() == OperandType::QUANT_INT8_SYMM;
224}
225
227{
228 prepare();
229 if (_input->is_dynamic() || _kernel->is_dynamic())
230 {
231 const auto ifm_shape = _input->getShape().asFeature();
232 const auto ofm_shape = _output->getShape().asFeature();
233 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
234 const auto ker_shape = _kernel->getShape();
235 const auto ker_height = ker_shape.dim(1);
236 const auto ker_width = ker_shape.dim(2);
237
238 ir::Stride stride;
239 stride.vertical = _strideWidth;
240 stride.horizontal = _strideWidth;
241
242 ir::Padding param_padding;
243 param_padding.type = _paddingType;
244 param_padding.param.left = _paddingLeft;
245 param_padding.param.right = _paddingRight;
246 param_padding.param.top = _paddingTop;
247 param_padding.param.bottom = _paddingBottom;
248
249 const auto padding =
250 ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
252
253 _paddingLeft = padding.left;
254 _paddingRight = padding.right;
255 _paddingTop = padding.top;
256 _paddingBottom = padding.bottom;
257 }
258 if (_is_hybrid)
259 {
260 convQ8iHybridPerChannel();
261 }
262 else if (_input->data_type() == OperandType::FLOAT32)
263 {
264 convFloat32();
265 }
266 else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
267 {
268 const bool per_channel_quantized = _kernel->data_scales().size() > 1;
269 if (per_channel_quantized)
270 convQ8uPerChannel();
271 else
272 convQ8uPerTensor();
273 }
274 else if (_input->data_type() == OperandType::QUANT_INT8_ASYMM)
275 {
276 convQ8i();
277 }
278 else
279 {
280 throw std::runtime_error{"Conv: unsupported data type"};
281 }
282}
283
285{
286 if (_prepare)
287 return;
288
289 if (_is_hybrid)
290 {
291 // ensure weight is per-channel quantized.
292 int32_t kernel_output_channel = getShape(_kernel).Dims(0);
293 // zero_points comes from flatbuffer vector. Its size is within uint32_t range.
294 size_t kernel_zerop_cnt = _kernel->data_scales().size();
295 // promote to int64_t to compare int32_t and uint32_t
296 if ((int64_t)kernel_output_channel != (int64_t)kernel_zerop_cnt)
297 throw std::runtime_error{"Conv2D hybrid supports only per-channel quantized weight."};
298
299 // allocate memory for activation quantization.
300 // - quantized values (int8_t type and same shape of original input)
301 // - quantization params (= scale/zeropoint for each input)
302 auto input_shape = getShape(_input);
303 const int batch_size = input_shape.Dims(0);
304 const int input_size = input_shape.FlatSize() / batch_size;
305 _hybrid_arena = std::make_unique<nnfw::cker::ConvHybridTempArena>(batch_size, input_size);
306 _prepare = true;
307 return;
308 }
309
311 if (_input->data_type() == OperandType::FLOAT32 && _is_cachable_weights)
312 {
313 bool is_transposed = false;
314 kernel.prepareF32(getShape(_kernel), getBuffer<float>(_kernel), getPaddingType(_paddingType),
316
317 // Decrease reference of _kernel(weights) only when _kernel is constant
318 if (is_transposed)
319 {
320 auto kernel_tensor = dynamic_cast<const Tensor *>(_kernel);
321 if (kernel_tensor)
322 // TODO Remove const_cast
323 const_cast<Tensor *>(kernel_tensor)->decrease_ref();
324 }
325 }
326 else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM && _is_cachable_weights &&
328 {
329 const bool per_channel_quantized = _kernel->data_scales().size() > 1;
330 if (per_channel_quantized)
331 {
334 _kernel->data_scales().size(), getShape(_kernel).Dims(0),
336 }
337 else
338 {
342 }
343 }
344 else if (_input->data_type() == OperandType::QUANT_INT8_ASYMM)
345 {
347 {
350 _kernel->data_scales().size(), getShape(_kernel).Dims(0),
352 }
353 else
354 {
355 throw std::runtime_error{"Conv2D: Int8 dynamic weight is not supported"};
356 }
357 }
358 _prepare = true;
359}
360
361} // namespace ops
362} // namespace cpu
363} // namespace backend
364} // namespace onert
void Conv(const float *input_data, const Dims< 4 > &input_dims, const float *filter_data, const Dims< 4 > &filter_dims, const float *bias_data, const Dims< 4 > &bias_dims, int stride_width, int stride_height, int pad_width, int pad_height, float *output_data, const Dims< 4 > &output_dims, float *im2col_data, const Dims< 4 > &im2col_dims)
std::vector< int > & per_channel_output_shift()
Definition Conv.h:151
void prepareQ8uPerTensor(const Shape &input_shape, const Shape &kernel_shape, const Shape &output_shape, uint32_t stride_width, uint32_t stride_height, uint32_t dilation_width_factor, uint32_t dilation_height_factor)
Definition Conv.h:74
void prepareF32(const Shape &filter_shape, const float *filter_data, PaddingType padding_type, bool &is_replaced_weights, uint32_t dilationWidthFactor, uint32_t dilationHeightFactor)
Definition Conv.h:60
std::vector< int32_t > & per_channel_output_multiplier()
Definition Conv.h:150
int32_t Dims(int i) const
Definition Shape.h:92
A tensor class that is portable for other backends.
const std::vector< float > & data_scales() const override final
float data_scale() const override final
int32_t data_zero_point() const override final
const std::vector< int32_t > & data_zero_points() const override
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
bool is_dynamic() const override final
Return true if the tensor needs dynamic allocation, meaning that during compile-time the outpus shape...
virtual uint8_t * buffer() const =0
std::unique_ptr< nnfw::cker::Conv > _conv_kernel
void configure(const IPortableTensor *input, const IPortableTensor *kernel, const IPortableTensor *bias, ir::PaddingType _paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t dilationWidthFactor, const uint32_t dilationHeightFactor, const ir::Activation activation, IPortableTensor *output, bool is_cachable_weights)
std::unique_ptr< nnfw::cker::ConvHybridTempArena > _hybrid_arena
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
void HybridConvPerChannel(const ConvParams &params, float *scaling_factors_ptr, const Shape &input_shape, const int8_t *input_data, const Shape &filter_shape, const int8_t *filter_data, const Shape &bias_shape, const float *bias_data, const Shape &output_shape, float *output_data, const float *per_channel_scale, const int32_t *input_offset)
Definition Conv.h:314
void PortableAsymmetricQuantizeFloats(const float *values, const int size, int8_t *quantized_values, float *scaling_factor, int32_t *offset)
Definition topk_v2.h:30
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultipliersAndShifts(float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift)
void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type)
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)
void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier)
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)
Definition Padding.cc:133
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
Definition Dims.h:26
int16_t stride_height
Definition Types.h:146
PaddingValues padding_values
Definition Types.h:143
float float_activation_max
Definition Types.h:161
int32_t output_multiplier
Definition Types.h:154
int32_t weights_offset
Definition Types.h:152
int32_t output_offset
Definition Types.h:153
int16_t dilation_width_factor
Definition Types.h:147
float float_activation_min
Definition Types.h:160
int32_t quantized_activation_max
Definition Types.h:158
PaddingType padding_type
Definition Types.h:142
int16_t dilation_height_factor
Definition Types.h:148
int32_t quantized_activation_min
Definition Types.h:157
PaddingType type
Definition Padding.h:61
ExplicitPadding param
Definition Padding.h:62