ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::backend::cpu::ops::SoftMaxLayer Class Reference

#include <SoftMaxLayer.h>

Collaboration diagram for onert::backend::cpu::ops::SoftMaxLayer:

Public Member Functions

 SoftMaxLayer ()
 
void softmaxFloat32 ()
 
template<typename T >
void softmaxQuant8 ()
 
void configure (const IPortableTensor *input, const float beta, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Protected Attributes

const IPortableTensor_input
 
IPortableTensor_output
 

Detailed Description

Definition at line 27 of file SoftMaxLayer.h.

Constructor & Destructor Documentation

◆ SoftMaxLayer()

onert::backend::cpu::ops::SoftMaxLayer::SoftMaxLayer ( )

Definition at line 26 of file SoftMaxLayer.cc.

26 : _input(nullptr), _output(nullptr), _beta(0.0)
27{
28 // DO NOTHING
29}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::SoftMaxLayer::configure ( const IPortableTensor input,
const float  beta,
IPortableTensor output 
)

Definition at line 82 of file SoftMaxLayer.cc.

84{
85 _input = input;
87 _beta = beta;
88
89 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM ||
90 _input->data_type() == OperandType::QUANT_INT8_ASYMM)
91 {
92#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
93 // Only apply when both input & output are uint8/int8 & build with clang
94 // on aarch64.
95 nnfw::cker::PopulateSoftmaxUInt8LookupTable(_uint8_table1, _uint8_table2, _input->data_scale(),
96 _beta);
97#else
99#endif
100 }
101}
float data_scale() const override final
ir::DataType data_type() const override final
void PopulateSoftmaxLookupTable(float *table, float input_scale, float beta)
Definition SoftMax.h:148

References _input, _output, onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_type(), and nnfw::cker::PopulateSoftmaxLookupTable().

◆ run()

void onert::backend::cpu::ops::SoftMaxLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 103 of file SoftMaxLayer.cc.

104{
105 switch (_input->data_type())
106 {
107 case OperandType::FLOAT32:
109 break;
110 case OperandType::QUANT_UINT8_ASYMM:
111 softmaxQuant8<uint8_t>();
112 break;
113 case OperandType::QUANT_INT8_ASYMM:
114 softmaxQuant8<int8_t>();
115 break;
116 default:
117 throw std::runtime_error{"SoftMax: unsupported data type"};
118 }
119}

References _input, onert::backend::IPortableTensor::data_type(), and softmaxFloat32().

Referenced by onert::backend::train::ops::SoftMaxLayer::forward().

◆ softmaxFloat32()

void onert::backend::cpu::ops::SoftMaxLayer::softmaxFloat32 ( )

Definition at line 31 of file SoftMaxLayer.cc.

32{
34 {
35 uint32_t input_size = getNumberOfElements(_input);
36 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, 1, _beta, getBuffer<float>(_output));
37 }
38 else if (getNumberOfDimensions(_input) == 2)
39 {
40 uint32_t batch_size = getSizeOfDimension(_input, 0);
41 if (batch_size == 0)
42 throw std::runtime_error("batch_size should not be 0");
43
44 uint32_t input_size = getNumberOfElements(_input) / batch_size;
45 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, batch_size, _beta,
46 getBuffer<float>(_output));
47 }
48 else if (getNumberOfDimensions(_input) == 4)
49 {
51 op_params.beta = _beta;
52 nnfw::cker::Softmax(op_params, getShape(_input), getBuffer<float>(_input), getShape(_output),
53 getBuffer<float>(_output));
54 }
55 else
56 {
58 op_params.beta = _beta;
59 nnfw::cker::reference::Softmax(op_params, getShape(_input), getBuffer<float>(_input),
60 getShape(_output), getBuffer<float>(_output));
61 }
62}
uint32_t getNumberOfElements(const Shape &shape)
Definition Shape.cpp:48
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
void Softmax(const SoftmaxParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition SoftMax.h:43
void Softmax(const float *in, const int input_size, const int batch_size, const float beta, float *out)
Definition SoftMax.h:79
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References _input, _output, nnfw::cker::SoftmaxParams::beta, getNumberOfDimensions(), getNumberOfElements(), onert::backend::cpu::ops::getShape(), getSizeOfDimension(), nnfw::cker::Softmax(), and nnfw::cker::reference::Softmax().

Referenced by run().

◆ softmaxQuant8()

template<typename T >
void onert::backend::cpu::ops::SoftMaxLayer::softmaxQuant8 ( )

Definition at line 64 of file SoftMaxLayer.cc.

65{
67 op_params.scale = _output->data_scale();
68 op_params.zero_point = _output->data_zero_point();
69 op_params.uint8_table1 = _uint8_table1;
70 op_params.uint8_table2 = _uint8_table2;
71 op_params.table = _table;
72
73#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
74 nnfw::cker::SoftmaxInt8LUT<T, T>(op_params, getShape(_input), getBuffer<T>(_input),
75 getShape(_output), getBuffer<T>(_output));
76#else
77 nnfw::cker::Softmax<T, T>(op_params, getShape(_input), getBuffer<T>(_input), getShape(_output),
78 getBuffer<T>(_output));
79#endif
80}
int32_t data_zero_point() const override final

References _input, _output, onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::SoftmaxParams::scale, nnfw::cker::SoftmaxParams::table, nnfw::cker::SoftmaxParams::uint8_table1, nnfw::cker::SoftmaxParams::uint8_table2, and nnfw::cker::SoftmaxParams::zero_point.

Field Documentation

◆ _input

const IPortableTensor* onert::backend::cpu::ops::SoftMaxLayer::_input
protected

◆ _output

IPortableTensor* onert::backend::cpu::ops::SoftMaxLayer::_output
protected

The documentation for this class was generated from the following files: