ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
PoolLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "PoolLayer.h"
18#include "OperationUtils.h"
19#include "../Tensor.h"
20
21#include <cker/Utils.h>
26
28{
29
30namespace
31{
32
33class MaxPool2D final : public TrainingKernelRegistry
34{
35private:
36 const ir::Activation _activation;
37 const IPortableTensor *_output;
39
40 std::unique_ptr<Tensor> _act_back_prop_output;
41 std::unique_ptr<Tensor> _arg_max_index;
42
43public:
44 MaxPool2D(const uint32_t paddingLeft, const uint32_t, const uint32_t paddingTop, const uint32_t,
45 const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth,
46 const uint32_t kernelHeight, const ir::Activation activation,
47 const IPortableTensor *output)
48 : _activation(activation), _output(output)
49 {
50 {
51 _op_params.stride_height = strideHeight;
52 _op_params.stride_width = strideWidth;
53 _op_params.filter_height = kernelHeight;
54 _op_params.filter_width = kernelWidth;
55 assert(paddingTop < (1 << 15));
56 assert(paddingLeft < (1 << 15));
57 _op_params.padding_values.height = static_cast<int16_t>(paddingTop);
58 _op_params.padding_values.width = static_cast<int16_t>(paddingLeft);
59 CalculateActivationRange<float>(activation, &_op_params.float_activation_min,
61 }
62
63 _arg_max_index = std::make_unique<Tensor>(_output->get_info());
64 _arg_max_index->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
65
66 if (activation != ir::Activation::NONE)
67 {
68 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
69 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
70 }
71 };
72
73 ~MaxPool2D() {}
74
75public:
76 void forward(const IPortableTensor *in, IPortableTensor *out)
77 {
78 auto out_shape = getShape(out);
79 auto out_data = getBuffer<float>(out);
80 auto arg_max_index = _arg_max_index.get();
81
82 // maxpool forward
83 nnfw::cker::train::MaxPool2D(_op_params, getShape(in), getBuffer<float>(in), out_shape,
84 out_data, getBuffer<int>(arg_max_index));
85 }
86
87 void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
88 {
89 // activation backward
90 try
91 {
92 back_prop_out =
93 backpropActivation(_activation, _output, back_prop_out, _act_back_prop_output.get());
94 }
95 catch (const std::exception &e)
96 {
97 throw std::runtime_error{"train PoolLayer: " + std::string(e.what())};
98 }
99 assert(back_prop_out != nullptr);
100
101 // maxpool baackward
102 auto arg_max_index = _arg_max_index.get();
103 nnfw::cker::train::MaxPool2DGrad(getShape(back_prop_out), getBuffer<float>(back_prop_out),
104 getBuffer<int>(arg_max_index), getShape(back_prop_in),
105 getBuffer<float>(back_prop_in));
106 }
107};
108
109class AveragePool2D final : public TrainingKernelRegistry
110{
111private:
112 const ir::Activation _activation;
113 const IPortableTensor *_output;
115
116 std::unique_ptr<Tensor> _act_back_prop_output;
117 std::unique_ptr<Tensor> _arg_avg_index;
118
119public:
120 AveragePool2D(const uint32_t paddingLeft, const uint32_t, const uint32_t paddingTop,
121 const uint32_t, const uint32_t strideWidth, const uint32_t strideHeight,
122 const uint32_t kernelWidth, const uint32_t kernelHeight,
123 const ir::Activation activation, const IPortableTensor *output)
124 : _activation(activation), _output(output)
125 {
126 {
127 _op_params.stride_height = strideHeight;
128 _op_params.stride_width = strideWidth;
129 _op_params.filter_height = kernelHeight;
130 _op_params.filter_width = kernelWidth;
131 assert(paddingTop < (1 << 15));
132 assert(paddingLeft < (1 << 15));
133 _op_params.padding_values.height = static_cast<int16_t>(paddingTop);
134 _op_params.padding_values.width = static_cast<int16_t>(paddingLeft);
135 CalculateActivationRange<float>(activation, &_op_params.float_activation_min,
137 }
138
139 if (activation != ir::Activation::NONE)
140 {
141 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
142 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
143 }
144 };
145
146 ~AveragePool2D() {}
147
148public:
149 void forward(const IPortableTensor *in, IPortableTensor *out)
150 {
151 auto out_shape = getShape(out);
152 auto out_data = getBuffer<float>(out);
153
154 // avgpool forward
155 nnfw::cker::AveragePool<float>(_op_params, getShape(in), getBuffer<float>(in), out_shape,
156 out_data);
157 }
158
159 void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
160 {
161 // activation backward
162 try
163 {
164 back_prop_out =
165 backpropActivation(_activation, _output, back_prop_out, _act_back_prop_output.get());
166 }
167 catch (const std::exception &e)
168 {
169 throw std::runtime_error{"train PoolLayer: " + std::string(e.what())};
170 }
171 assert(back_prop_out != nullptr);
172
173 // averagepool baackward
174 nnfw::cker::train::AveragePool2DGrad(_op_params, getShape(back_prop_out),
175 getBuffer<float>(back_prop_out), getShape(back_prop_in),
176 getBuffer<float>(back_prop_in));
177 }
178};
179
180} // namespace
181
183 : cpu::ops::PoolLayer(), _back_prop_input(nullptr), _back_prop_output(nullptr), _kernel(nullptr)
184{
185 // DO NOTHING
186}
187
188void PoolLayer::configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight,
189 const uint32_t paddingTop, const uint32_t paddingBottom,
190 const uint32_t strideWidth, const uint32_t strideHeight,
191 const uint32_t kernelWidth, const uint32_t kernelHeight,
192 const ir::Activation activation, const PoolType op_type,
193 IPortableTensor *output, IPortableTensor *back_prop_input,
194 const IPortableTensor *back_prop_output)
195{
196 _back_prop_output = back_prop_output;
197 _back_prop_input = back_prop_input;
198
199 if (output->data_type() != OperandType::FLOAT32)
200 {
201 throw std::runtime_error("PoolLayer : Unsupported data type for training");
202 }
203
204 // ready training kernel
205 switch (op_type)
206 {
207 case PoolType::kMax:
208 _kernel = std::make_unique<MaxPool2D>(paddingLeft, paddingRight, paddingTop, paddingBottom,
209 strideWidth, strideHeight, kernelWidth, kernelHeight,
210 activation, output);
211 break;
212 case PoolType::kAvg:
213 _kernel = std::make_unique<AveragePool2D>(paddingLeft, paddingRight, paddingTop,
214 paddingBottom, strideWidth, strideHeight,
215 kernelWidth, kernelHeight, activation, output);
216 break;
217 default:
218 throw std::runtime_error("PoolLayer: Unsupported pool type");
219 }
220}
221
222void PoolLayer::forward(bool training)
223{
224 if (training)
225 {
226 _kernel->forward(_input, _output);
227 }
228 else
229 {
231 }
232}
233
234void PoolLayer::backward() { _kernel->backward(_back_prop_output, _back_prop_input); }
235
236} // namespace onert::backend::train::ops
A tensor class that is portable for other backends.
const IPortableTensor * _input
Definition PoolLayer.h:51
void configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, const PoolType op_type, IPortableTensor *output, IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output)
Definition PoolLayer.cc:188
void forward(bool training) override
Definition PoolLayer.cc:222
nnfw::cker::BinaryArithmeticOpParam _op_params
void MaxPool2D(const mir::TensorVariant &input, const mir::ops::MaxPool2DOp &op, mir::TensorVariant &result)
void AveragePool2DGrad(const PoolParams &params, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition AveragePool.h:33
void MaxPool2D(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data, int *arg_max_index)
Definition MaxPool.h:36
void MaxPool2DGrad(const Shape &incoming_shape, const float *incoming_data, const int *arg_max_index, const Shape &grad_shape, float *grad_data)
Definition MaxPool.h:129
void AveragePool< float >(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition AveragePool.h:44
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.