ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ConvolutionLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "ConvolutionLayer.h"
18
19#include "../Tensor.h"
20#include "ir/Padding.h"
21
22namespace onert
23{
24namespace backend
25{
26namespace ruy
27{
28namespace ops
29{
31 : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
32 _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
33 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
34 _dilationHeightFactor(1), _activation(ir::Activation::NONE),
35 _conv_kernel(new nnfw::ruy::Conv()), _prepare(false)
36{
37 // DO NOTHING
38}
39
41
43{
44 float output_activation_min = 0, output_activation_max = 0;
45 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
46
47 nnfw::ruy::ConvParams op_params;
48 op_params.padding_type = getPaddingType(_paddingType);
49 op_params.padding_values.width = _paddingLeft;
50 op_params.padding_values.height = _paddingTop;
51 op_params.stride_width = _strideWidth;
52 op_params.stride_height = _strideHeight;
53 op_params.dilation_width_factor = _dilationWidthFactor;
54 op_params.dilation_height_factor = _dilationHeightFactor;
55 op_params.float_activation_min = output_activation_min;
56 op_params.float_activation_max = output_activation_max;
57
58 nnfw::ruy::Conv &kernel = *_conv_kernel;
59 kernel(op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
60 getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
61 getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()),
62 getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
63 _external_context->ruy_context());
64}
65
67 const IPortableTensor *bias, const ir::PaddingType paddingType,
68 const uint32_t paddingLeft, const uint32_t paddingRight,
69 const uint32_t paddingTop, const uint32_t paddingBottom,
70 const uint32_t strideWidth, const uint32_t strideHeight,
71 const uint32_t dilationWidthFactor,
72 const uint32_t dilationHeightFactor,
73 const ir::Activation activation, IPortableTensor *output,
74 const std::shared_ptr<ExternalContext> &external_context)
75{
76 _input = input;
77 _kernel = kernel;
78 _bias = bias;
79 _paddingType = paddingType;
80 _paddingLeft = paddingLeft;
81 _paddingRight = paddingRight;
82 _paddingTop = paddingTop;
83 _paddingBottom = paddingBottom;
84 _strideWidth = strideWidth;
85 _strideHeight = strideHeight;
86 _dilationWidthFactor = dilationWidthFactor;
87 _dilationHeightFactor = dilationHeightFactor;
88 _activation = activation;
89 _output = output;
90 _external_context = external_context;
91}
92
94{
95 prepare();
96
97 if (_input->is_dynamic() || _kernel->is_dynamic())
98 {
99 const auto ifm_shape = _input->getShape().asFeature();
100 const auto ofm_shape = _output->getShape().asFeature();
101 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
102 const auto ker_shape = _kernel->getShape();
103 const auto ker_height = ker_shape.dim(1);
104 const auto ker_width = ker_shape.dim(2);
105
106 ir::Stride stride;
107 stride.vertical = _strideWidth;
108 stride.horizontal = _strideWidth;
109
110 ir::Padding param_padding;
111 param_padding.type = _paddingType;
112 param_padding.param.left = _paddingLeft;
113 param_padding.param.right = _paddingRight;
114 param_padding.param.top = _paddingTop;
115 param_padding.param.bottom = _paddingBottom;
116
117 const auto padding =
118 ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
119 _dilationWidthFactor, _dilationHeightFactor);
120
121 _paddingLeft = padding.left;
122 _paddingRight = padding.right;
123 _paddingTop = padding.top;
124 _paddingBottom = padding.bottom;
125 }
126 if (_input->data_type() == OperandType::FLOAT32)
127 {
128 convFloat32();
129 }
130 else
131 {
132 throw std::runtime_error{"Conv: unsupported data type"};
133 }
134}
135
137{
138 if (_prepare)
139 return;
140
141 nnfw::ruy::Conv &kernel = *_conv_kernel;
142 if (_input->data_type() == OperandType::FLOAT32 && _kernel->is_constant())
143 {
144 kernel.prepare(getTensorShape(_input), getTensorShape(_kernel), getTensorShape(_output),
145 _strideWidth, _strideHeight, _dilationWidthFactor, _dilationHeightFactor);
146 }
147 _prepare = true;
148}
149
150} // namespace ops
151} // namespace ruy
152} // namespace backend
153} // namespace onert
void Conv(const float *input_data, const Dims< 4 > &input_dims, const float *filter_data, const Dims< 4 > &filter_dims, const float *bias_data, const Dims< 4 > &bias_dims, int stride_width, int stride_height, int pad_width, int pad_height, float *output_data, const Dims< 4 > &output_dims, float *im2col_data, const Dims< 4 > &im2col_dims)
void prepare(const Shape &input_shape, const Shape &kernel_shape, const Shape &output_shape, uint32_t stride_width, uint32_t stride_height, uint32_t dilation_width_factor, uint32_t dilation_height_factor)
Definition Conv.h:41
A tensor class that is portable for other backends.
ir::DataType data_type() const override final
ir::Shape getShape() const override final
Get ir::Shape of tensor.
bool is_dynamic() const override final
Return true if the tensor needs dynamic allocation, meaning that during compile-time the outpus shape...
bool is_constant() const override final
Return true if the tensor is constant.
virtual uint8_t * buffer() const =0
void configure(const IPortableTensor *input, const IPortableTensor *kernel, const IPortableTensor *bias, ir::PaddingType _paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t dilationWidthFactor, const uint32_t dilationHeightFactor, const ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)
Definition topk_v2.h:30
nnfw::ruy::Shape getTensorShape(const IPortableTensor *tensor)
const ExplicitPadding calculatePadding(const Padding &padding, const FeatureShape &ifm_shape, const FeatureShape &ofm_shape, const Stride &stride, uint32_t kw, uint32_t kh, uint32_t dwf=1, uint32_t dhf=1)
Definition Padding.cc:133
void CalculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
float float_activation_max
Definition Types.h:77
PaddingType padding_type
Definition Types.h:58
PaddingValues padding_values
Definition Types.h:59
float float_activation_min
Definition Types.h:76
int16_t dilation_height_factor
Definition Types.h:64
int16_t stride_width
Definition Types.h:61
int16_t dilation_width_factor
Definition Types.h:63
int16_t stride_height
Definition Types.h:62
PaddingType type
Definition Padding.h:61
ExplicitPadding param
Definition Padding.h:62