39class MaxPool2D final :
public TrainingKernelRegistry
43 const IPortableTensor *_output;
46 std::unique_ptr<Tensor> _act_back_prop_output;
47 std::unique_ptr<Tensor> _arg_max_index;
50 MaxPool2D(
const uint32_t paddingLeft,
const uint32_t,
const uint32_t paddingTop,
const uint32_t,
51 const uint32_t strideWidth,
const uint32_t strideHeight,
const uint32_t kernelWidth,
53 const IPortableTensor *output)
54 : _activation(activation), _output(
output)
61 assert(paddingTop < (1 << 15));
62 assert(paddingLeft < (1 << 15));
63 _op_params.padding_values.height =
static_cast<int16_t
>(paddingTop);
64 _op_params.padding_values.width =
static_cast<int16_t
>(paddingLeft);
69 _arg_max_index = std::make_unique<Tensor>(_output->get_info());
70 _arg_max_index->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
74 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
75 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
82 void forward(
const IPortableTensor *in, IPortableTensor *out)
85 auto out_data = getBuffer<float>(out);
86 auto arg_max_index = _arg_max_index.get();
90 out_data, getBuffer<int>(arg_max_index));
93 void backward(
const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
101 catch (
const std::exception &e)
103 throw std::runtime_error{
"train PoolLayer: " + std::string(e.what())};
105 assert(back_prop_out !=
nullptr);
108 auto arg_max_index = _arg_max_index.get();
110 getBuffer<int>(arg_max_index),
getShape(back_prop_in),
111 getBuffer<float>(back_prop_in));
115class AveragePool2D final :
public TrainingKernelRegistry
119 const IPortableTensor *_output;
122 std::unique_ptr<Tensor> _act_back_prop_output;
123 std::unique_ptr<Tensor> _arg_avg_index;
126 AveragePool2D(
const uint32_t paddingLeft,
const uint32_t,
const uint32_t paddingTop,
127 const uint32_t,
const uint32_t strideWidth,
const uint32_t strideHeight,
128 const uint32_t kernelWidth,
const uint32_t kernelHeight,
130 : _activation(activation), _output(
output)
137 assert(paddingTop < (1 << 15));
138 assert(paddingLeft < (1 << 15));
139 _op_params.padding_values.height =
static_cast<int16_t
>(paddingTop);
140 _op_params.padding_values.width =
static_cast<int16_t
>(paddingLeft);
147 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
148 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
155 void forward(
const IPortableTensor *in, IPortableTensor *out)
158 auto out_data = getBuffer<float>(out);
165 void backward(
const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
173 catch (
const std::exception &e)
175 throw std::runtime_error{
"train PoolLayer: " + std::string(e.what())};
177 assert(back_prop_out !=
nullptr);
181 getBuffer<float>(back_prop_out),
getShape(back_prop_in),
182 getBuffer<float>(back_prop_in));
189 : cpu::
ops::
PoolLayer(), _back_prop_input(nullptr), _back_prop_output(nullptr), _kernel(nullptr)
195 const uint32_t paddingTop,
const uint32_t paddingBottom,
196 const uint32_t strideWidth,
const uint32_t strideHeight,
197 const uint32_t kernelWidth,
const uint32_t kernelHeight,
202 _back_prop_output = back_prop_output;
203 _back_prop_input = back_prop_input;
205 if (output->data_type() != OperandType::FLOAT32)
207 throw std::runtime_error(
"PoolLayer : Unsupported data type for training");
214 _kernel = std::make_unique<MaxPool2D>(paddingLeft, paddingRight, paddingTop, paddingBottom,
215 strideWidth, strideHeight, kernelWidth, kernelHeight,
219 _kernel = std::make_unique<AveragePool2D>(paddingLeft, paddingRight, paddingTop,
220 paddingBottom, strideWidth, strideHeight,
221 kernelWidth, kernelHeight, activation, output);
224 throw std::runtime_error(
"PoolLayer: Unsupported pool type");
A tensor class that is portable for other backends.
const IPortableTensor * _input
IPortableTensor * _output
void configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, const PoolType op_type, IPortableTensor *output, IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output)
void forward(bool training) override
nnfw::cker::BinaryArithmeticOpParam _op_params
void AveragePool2DGrad(const PoolParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
void MaxPool2D(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data, int *arg_max_index)
void MaxPool2DGrad(const Shape &incoming_shape, const float *incoming_data, const int *arg_max_index, const Shape &grad_shape, float *grad_data)
void AveragePool< float >(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
float float_activation_max
float float_activation_min