ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::PoolLayer Class Reference

#include <Pool2DLayer.h>

Collaboration diagram for onert::backend::cpu::ops::PoolLayer:

Public Member Functions

 PoolLayer ()
 
void configure (const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, IPortableTensor *output, const PoolType op_type)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Protected Attributes

const IPortableTensor_input
 
IPortableTensor_output
 

Detailed Description

Definition at line 35 of file Pool2DLayer.h.

Constructor & Destructor Documentation

◆ PoolLayer()

onert::backend::cpu::ops::PoolLayer::PoolLayer ( )

Definition at line 113 of file Pool2DLayer.cc.

113 : _input(nullptr), _output(nullptr), _kernel()
114{
115 // DO NOTHING
116}
const IPortableTensor * _input
Definition Pool2DLayer.h:51

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::PoolLayer::configure ( const IPortableTensor input,
const uint32_t  paddingLeft,
const uint32_t  paddingRight,
const uint32_t  paddingTop,
const uint32_t  paddingBottom,
const uint32_t  strideWidth,
const uint32_t  strideHeight,
const uint32_t  kernelWidth,
const uint32_t  kernelHeight,
const ir::Activation  activation,
IPortableTensor output,
const PoolType  op_type 
)

Definition at line 131 of file Pool2DLayer.cc.

136{
137 assert(input != nullptr);
138 assert(output != nullptr);
139
140 _input = input;
141 _output = output;
142
144
145 switch (_input->data_type())
146 {
147 case OperandType::FLOAT32:
148 {
149 float output_activation_min = 0;
150 float output_activation_max = 0;
151 CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max);
152 op_params.float_activation_min = output_activation_min;
153 op_params.float_activation_max = output_activation_max;
154
155 _kernel = generateKernelGeneric<float>(op_params, op_type);
156 break;
157 }
158 case OperandType::QUANT_UINT8_ASYMM:
159 {
160 int32_t output_activation_min = 0;
161 int32_t output_activation_max = 0;
162 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
163 &output_activation_max);
164 op_params.quantized_activation_min = output_activation_min;
165 op_params.quantized_activation_max = output_activation_max;
166 _kernel = generateKernelGeneric<uint8_t>(op_params, op_type);
167 break;
168 }
169 case OperandType::QUANT_INT8_ASYMM:
170 {
171 int32_t output_activation_min = 0;
172 int32_t output_activation_max = 0;
173 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
174 &output_activation_max);
175 op_params.quantized_activation_min = output_activation_min;
176 op_params.quantized_activation_max = output_activation_max;
177 _kernel = generateKernelGeneric<int8_t>(op_params, op_type);
178 break;
179 }
180 default:
181 throw std::runtime_error{"Pool: unsupported data type"};
182 }
183}
#define POOLING_PARAMETERS
ir::DataType data_type() const override final
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)

References _input, _output, onert::backend::cpu::ops::CalculateActivationRangeQuantized(), onert::backend::IPortableTensor::data_type(), and POOLING_PARAMETERS.

◆ run()

void onert::backend::cpu::ops::PoolLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 185 of file Pool2DLayer.cc.

185{ _kernel(_input, _output); }

References _input, and _output.

Referenced by onert::backend::train::ops::PoolLayer::forward().

Field Documentation

◆ _input

const IPortableTensor* onert::backend::cpu::ops::PoolLayer::_input
protected

Definition at line 51 of file Pool2DLayer.h.

Referenced by configure(), onert::backend::train::ops::PoolLayer::forward(), and run().

◆ _output

IPortableTensor* onert::backend::cpu::ops::PoolLayer::_output
protected

Definition at line 52 of file Pool2DLayer.h.

Referenced by configure(), onert::backend::train::ops::PoolLayer::forward(), and run().


The documentation for this class was generated from the following files: