33class MaxPool2D final :
public TrainingKernelRegistry
37 const IPortableTensor *_output;
40 std::unique_ptr<Tensor> _act_back_prop_output;
41 std::unique_ptr<Tensor> _arg_max_index;
44 MaxPool2D(
const uint32_t paddingLeft,
const uint32_t,
const uint32_t paddingTop,
const uint32_t,
45 const uint32_t strideWidth,
const uint32_t strideHeight,
const uint32_t kernelWidth,
47 const IPortableTensor *output)
48 : _activation(activation), _output(
output)
55 assert(paddingTop < (1 << 15));
56 assert(paddingLeft < (1 << 15));
57 _op_params.padding_values.height =
static_cast<int16_t
>(paddingTop);
58 _op_params.padding_values.width =
static_cast<int16_t
>(paddingLeft);
63 _arg_max_index = std::make_unique<Tensor>(_output->get_info());
64 _arg_max_index->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
68 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
69 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
76 void forward(
const IPortableTensor *in, IPortableTensor *out)
79 auto out_data = getBuffer<float>(out);
80 auto arg_max_index = _arg_max_index.get();
84 out_data, getBuffer<int>(arg_max_index));
87 void backward(
const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
95 catch (
const std::exception &e)
97 throw std::runtime_error{
"train PoolLayer: " + std::string(e.what())};
99 assert(back_prop_out !=
nullptr);
102 auto arg_max_index = _arg_max_index.get();
104 getBuffer<int>(arg_max_index),
getShape(back_prop_in),
105 getBuffer<float>(back_prop_in));
109class AveragePool2D final :
public TrainingKernelRegistry
113 const IPortableTensor *_output;
116 std::unique_ptr<Tensor> _act_back_prop_output;
117 std::unique_ptr<Tensor> _arg_avg_index;
120 AveragePool2D(
const uint32_t paddingLeft,
const uint32_t,
const uint32_t paddingTop,
121 const uint32_t,
const uint32_t strideWidth,
const uint32_t strideHeight,
122 const uint32_t kernelWidth,
const uint32_t kernelHeight,
124 : _activation(activation), _output(
output)
131 assert(paddingTop < (1 << 15));
132 assert(paddingLeft < (1 << 15));
133 _op_params.padding_values.height =
static_cast<int16_t
>(paddingTop);
134 _op_params.padding_values.width =
static_cast<int16_t
>(paddingLeft);
141 _act_back_prop_output = std::make_unique<Tensor>(_output->get_info());
142 _act_back_prop_output->setBuffer(std::make_shared<basic::Allocator>(_output->total_size()));
149 void forward(
const IPortableTensor *in, IPortableTensor *out)
152 auto out_data = getBuffer<float>(out);
159 void backward(
const IPortableTensor *back_prop_out, IPortableTensor *back_prop_in)
167 catch (
const std::exception &e)
169 throw std::runtime_error{
"train PoolLayer: " + std::string(e.what())};
171 assert(back_prop_out !=
nullptr);
175 getBuffer<float>(back_prop_out),
getShape(back_prop_in),
176 getBuffer<float>(back_prop_in));
183 : cpu::
ops::
PoolLayer(), _back_prop_input(nullptr), _back_prop_output(nullptr), _kernel(nullptr)
189 const uint32_t paddingTop,
const uint32_t paddingBottom,
190 const uint32_t strideWidth,
const uint32_t strideHeight,
191 const uint32_t kernelWidth,
const uint32_t kernelHeight,
196 _back_prop_output = back_prop_output;
197 _back_prop_input = back_prop_input;
199 if (output->data_type() != OperandType::FLOAT32)
201 throw std::runtime_error(
"PoolLayer : Unsupported data type for training");
208 _kernel = std::make_unique<MaxPool2D>(paddingLeft, paddingRight, paddingTop, paddingBottom,
209 strideWidth, strideHeight, kernelWidth, kernelHeight,
213 _kernel = std::make_unique<AveragePool2D>(paddingLeft, paddingRight, paddingTop,
214 paddingBottom, strideWidth, strideHeight,
215 kernelWidth, kernelHeight, activation, output);
218 throw std::runtime_error(
"PoolLayer: Unsupported pool type");
A tensor class that is portable for other backends.
const IPortableTensor * _input
IPortableTensor * _output
void configureBackward(const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, const PoolType op_type, IPortableTensor *output, IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output)
void forward(bool training) override
nnfw::cker::BinaryArithmeticOpParam _op_params
void MaxPool2D(const mir::TensorVariant &input, const mir::ops::MaxPool2DOp &op, mir::TensorVariant &result)
void AveragePool2DGrad(const PoolParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
void MaxPool2D(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data, int *arg_max_index)
void MaxPool2DGrad(const Shape &incoming_shape, const float *incoming_data, const int *arg_max_index, const Shape &grad_shape, float *grad_data)
void AveragePool< float >(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
const IPortableTensor * backpropActivation(const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
backpropagate acitvation
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.
float float_activation_max
float float_activation_min