ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::PoolLayer Class Reference

#include <PoolLayer.h>

Collaboration diagram for onert::backend::cpu::ops::PoolLayer:

Public Member Functions

 PoolLayer ()
 
void configure (const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, IPortableTensor *output, const PoolType op_type)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Protected Attributes

const IPortableTensor_input
 
IPortableTensor_output
 

Detailed Description

Definition at line 41 of file PoolLayer.h.

Constructor & Destructor Documentation

◆ PoolLayer()

onert::backend::cpu::ops::PoolLayer::PoolLayer ( )

Definition at line 70 of file PoolLayer.cc.

70 : _input(nullptr), _output(nullptr), _kernel()
71{
72 // DO NOTHING
73}
const IPortableTensor * _input
Definition PoolLayer.h:57

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::PoolLayer::configure ( const IPortableTensor input,
const uint32_t  paddingLeft,
const uint32_t  paddingRight,
const uint32_t  paddingTop,
const uint32_t  paddingBottom,
const uint32_t  strideWidth,
const uint32_t  strideHeight,
const uint32_t  kernelWidth,
const uint32_t  kernelHeight,
const ir::Activation  activation,
IPortableTensor output,
const PoolType  op_type 
)

Definition at line 88 of file PoolLayer.cc.

93{
94 assert(input != nullptr);
95 assert(output != nullptr);
96
97 _input = input;
99
101
102 switch (_input->data_type())
103 {
104 case OperandType::FLOAT32:
105 {
106 float output_activation_min = 0;
107 float output_activation_max = 0;
108 CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max);
109 op_params.float_activation_min = output_activation_min;
110 op_params.float_activation_max = output_activation_max;
111
112 _kernel = generateKernelGeneric<float>(op_params, op_type);
113 break;
114 }
115 case OperandType::QUANT_UINT8_ASYMM:
116 {
117 int32_t output_activation_min = 0;
118 int32_t output_activation_max = 0;
119 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
120 &output_activation_max);
121 op_params.quantized_activation_min = output_activation_min;
122 op_params.quantized_activation_max = output_activation_max;
123 _kernel = generateKernelGeneric<uint8_t>(op_params, op_type);
124 break;
125 }
126 case OperandType::QUANT_INT8_ASYMM:
127 {
128 int32_t output_activation_min = 0;
129 int32_t output_activation_max = 0;
130 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
131 &output_activation_max);
132 op_params.quantized_activation_min = output_activation_min;
133 op_params.quantized_activation_max = output_activation_max;
134 _kernel = generateKernelGeneric<int8_t>(op_params, op_type);
135 break;
136 }
137 default:
138 throw std::runtime_error{"Pool: unsupported data type"};
139 }
140}
ir::DataType data_type() const override final
#define POOLING_PARAMETERS
Definition PoolLayer.cc:75
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)

References _input, _output, onert::backend::cpu::ops::CalculateActivationRangeQuantized(), onert::backend::IPortableTensor::data_type(), and POOLING_PARAMETERS.

◆ run()

void onert::backend::cpu::ops::PoolLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 142 of file PoolLayer.cc.

142{ _kernel(_input, _output); }

References _input, and _output.

Referenced by onert::backend::train::ops::PoolLayer::forward(), and package.infer.session::inference().

Field Documentation

◆ _input

const IPortableTensor* onert::backend::cpu::ops::PoolLayer::_input
protected

Definition at line 57 of file PoolLayer.h.

Referenced by configure(), onert::backend::train::ops::PoolLayer::forward(), and run().

◆ _output

IPortableTensor* onert::backend::cpu::ops::PoolLayer::_output
protected

Definition at line 58 of file PoolLayer.h.

Referenced by configure(), onert::backend::train::ops::PoolLayer::forward(), and run().


The documentation for this class was generated from the following files: