ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PoolLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "PoolLayer.h"
18#include "OperationUtils.h"
19#include "../Tensor.h"
20
21#include <cker/Utils.h>
26
27namespace onert
28{
29namespace backend
30{
31namespace train
32{
33namespace ops
34{
35
36namespace
37{
38
39class MaxPool2D final : public TrainingKernelRegistry
40{
41private:
42 const ir::Activation _activation;
43 const IPortableTensor *_output;
45
46 std::unique_ptr<Tensor> _act_back_prop_output;
47 std::unique_ptr<Tensor> _arg_max_index;
48
49public:
50 MaxPool2D(const uint32_t paddingLeft, const uint32_t, const uint32_t paddingTop, const uint32_t,
51 const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth,
52 const uint32_t kernelHeight, const ir::Activation activation,
53 const IPortableTensor *output)
54 : _activation(activation), _output(output)
55 {
56 {
57 _op_params.stride_height = strideHeight;
58 _op_params.stride_width = strideWidth;
59 _op_params.filter_height = kernelHeight;
60 _op_params.filter_width = kernelWidth;
61 assert(paddingTop < (1 << 15));
62 assert(paddingLeft < (1 << 15));
63 _op_params.padding_values.height = static_cast<int16_t>(paddingTop);
64 _op_params.padding_values.width = static_cast<int16_t>(paddingLeft);
65 CalculateActivationRange<float>(activation, &_op_params.float_activation_min,
67 }
68
69 _arg_max_index = std::make_unique<Tensor>(_output->get_info());
70 _arg_max_index->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
71
72 if (activation != ir::Activation::NONE)
73 {
74 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
75 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
76 }
77 };
78
79 ~MaxPool2D() {}
80
81public:
82 void forward(const IPortableTensor *in, IPortableTensor *out)
83 {
84 auto out_shape = getShape(out);
85 auto out_data = getBuffer<float>(out);
86 auto arg_max_index = _arg_max_index.get();
87
88 // maxpool forward
89 nnfw::cker::train::MaxPool2D(_op_params, getShape(in), getBuffer<float>(in), out_shape,
90 out_data, getBuffer<int>(arg_max_index));
91 }
92
93 void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
94 {
95 // activation backward
96 try
97 {
98 back_prop_out =
99 backpropActivation(_activation, _output, back_prop_out, _act_back_prop_output.get());
100 }
101 catch (const std::exception &e)
102 {
103 throw std::runtime_error{"train PoolLayer: " + std::string(e.what())};
104 }
105 assert(back_prop_out != nullptr);
106
107 // maxpool baackward
108 auto arg_max_index = _arg_max_index.get();
109 nnfw::cker::train::MaxPool2DGrad(getShape(back_prop_out), getBuffer<float>(back_prop_out),
110 getBuffer<int>(arg_max_index), getShape(back_prop_in),
111 getBuffer<float>(back_prop_in));
112 }
113};
114
115class AveragePool2D final : public TrainingKernelRegistry
116{
117private:
118 const ir::Activation _activation;
119 const IPortableTensor *_output;
121
122 std::unique_ptr<Tensor> _act_back_prop_output;
123 std::unique_ptr<Tensor> _arg_avg_index;
124
125public:
126 AveragePool2D(const uint32_t paddingLeft, const uint32_t, const uint32_t paddingTop,
127 const uint32_t, const uint32_t strideWidth, const uint32_t strideHeight,
128 const uint32_t kernelWidth, const uint32_t kernelHeight,
129 const ir::Activation activation, const IPortableTensor *output)
130 : _activation(activation), _output(output)
131 {
132 {
133 _op_params.stride_height = strideHeight;
134 _op_params.stride_width = strideWidth;
135 _op_params.filter_height = kernelHeight;
136 _op_params.filter_width = kernelWidth;
137 assert(paddingTop < (1 << 15));
138 assert(paddingLeft < (1 << 15));
139 _op_params.padding_values.height = static_cast<int16_t>(paddingTop);
140 _op_params.padding_values.width = static_cast<int16_t>(paddingLeft);
141 CalculateActivationRange<float>(activation, &_op_params.float_activation_min,
143 }
144
145 if (activation != ir::Activation::NONE)
146 {
147 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
148 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
149 }
150 };
151
152 ~AveragePool2D() {}
153
154public:
155 void forward(const IPortableTensor *in, IPortableTensor *out)
156 {
157 auto out_shape = getShape(out);
158 auto out_data = getBuffer<float>(out);
159
160 // avgpool forward
161 nnfw::cker::AveragePool<float>(_op_params, getShape(in), getBuffer<float>(in), out_shape,
162 out_data);
163 }
164
165 void backward(const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
166 {
167 // activation backward
168 try
169 {
170 back_prop_out =
171 backpropActivation(_activation, _output, back_prop_out, _act_back_prop_output.get());
172 }
173 catch (const std::exception &e)
174 {
175 throw std::runtime_error{"train PoolLayer: " + std::string(e.what())};
176 }
177 assert(back_prop_out != nullptr);
178
179 // averagepool baackward
180 nnfw::cker::train::AveragePool2DGrad(_op_params, getShape(back_prop_out),
181 getBuffer<float>(back_prop_out), getShape(back_prop_in),
182 getBuffer<float>(back_prop_in));
183 }
184};
185
186} // namespace
187
189 : cpu::ops::PoolLayer(), _back_prop_input(nullptr), _back_prop_output(nullptr), _kernel(nullptr)
190{
191 // DO NOTHING
192}
193
194void PoolLayer::configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight,
195 const uint32_t paddingTop, const uint32_t paddingBottom,
196 const uint32_t strideWidth, const uint32_t strideHeight,
197 const uint32_t kernelWidth, const uint32_t kernelHeight,
198 const ir::Activation activation, const PoolType op_type,
199 IPortableTensor *output, IPortableTensor *back_prop_input,
200 const IPortableTensor *back_prop_output)
201{
202 _back_prop_output = back_prop_output;
203 _back_prop_input = back_prop_input;
204
205 if (output->data_type() != OperandType::FLOAT32)
206 {
207 throw std::runtime_error("PoolLayer : Unsupported data type for training");
208 }
209
210 // ready training kernel
211 switch (op_type)
212 {
213 case PoolType::kMax:
214 _kernel = std::make_unique<MaxPool2D>(paddingLeft, paddingRight, paddingTop, paddingBottom,
215 strideWidth, strideHeight, kernelWidth, kernelHeight,
216 activation, output);
217 break;
218 case PoolType::kAvg:
219 _kernel = std::make_unique<AveragePool2D>(paddingLeft, paddingRight, paddingTop,
220 paddingBottom, strideWidth, strideHeight,
221 kernelWidth, kernelHeight, activation, output);
222 break;
223 default:
224 throw std::runtime_error("PoolLayer: Unsupported pool type");
225 }
226}
227
228void PoolLayer::forward(bool training)
229{
230 if (training)
231 {
232 _kernel->forward(_input, _output);
233 }
234 else
235 {
237 }
238}
239
240void PoolLayer::backward() { _kernel->backward(_back_prop_output, _back_prop_input); }
241
242} // namespace ops
243} // namespace train
244} // namespace backend
245} // namespace onert
A tensor class that is portable for other backends.
const IPortableTensor * _input
Definition PoolLayer.h:57
void configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, const PoolType op_type, IPortableTensor *output, IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output)
Definition PoolLayer.cc:194
void forward(bool training) override
Definition PoolLayer.cc:228
nnfw::cker::BinaryArithmeticOpParam _op_params
void AveragePool2DGrad(const PoolParams &params, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition AveragePool.h:33
void MaxPool2D(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data, int *arg_max_index)
Definition MaxPool.h:36
void MaxPool2DGrad(const Shape &incoming_shape, const float *incoming_data, const int *arg_max_index, const Shape &grad_shape, float *grad_data)
Definition MaxPool.h:129
void AveragePool< float >(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition AveragePool.h:44
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.