ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::SoftMaxLayer Class Reference

#include <SoftMaxLayer.h>

Collaboration diagram for onert::backend::cpu::ops::SoftMaxLayer:

Public Member Functions

 SoftMaxLayer ()
 
void softmaxFloat32 ()
 
template<typename T >
void softmaxQuant8 ()
 
void configure (const IPortableTensor *input, const float beta, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Protected Attributes

const IPortableTensor_input
 
IPortableTensor_output
 

Detailed Description

Definition at line 33 of file SoftMaxLayer.h.

Constructor & Destructor Documentation

◆ SoftMaxLayer()

onert::backend::cpu::ops::SoftMaxLayer::SoftMaxLayer ( )

Definition at line 32 of file SoftMaxLayer.cc.

32 : _input(nullptr), _output(nullptr), _beta(0.0)
33{
34 // DO NOTHING
35}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::SoftMaxLayer::configure ( const IPortableTensor input,
const float  beta,
IPortableTensor output 
)

Definition at line 88 of file SoftMaxLayer.cc.

90{
91 _input = input;
93 _beta = beta;
94
95 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM ||
96 _input->data_type() == OperandType::QUANT_INT8_ASYMM)
97 {
98#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
99 // Only apply when both input & output are uint8/int8 & build with clang
100 // on aarch64.
101 nnfw::cker::PopulateSoftmaxUInt8LookupTable(_uint8_table1, _uint8_table2, _input->data_scale(),
102 _beta);
103#else
105#endif
106 }
107}
float data_scale() const override final
ir::DataType data_type() const override final
void PopulateSoftmaxLookupTable(float *table, float input_scale, float beta)
Definition SoftMax.h:148

References _input, _output, onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_type(), and nnfw::cker::PopulateSoftmaxLookupTable().

◆ run()

void onert::backend::cpu::ops::SoftMaxLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 109 of file SoftMaxLayer.cc.

110{
111 switch (_input->data_type())
112 {
113 case OperandType::FLOAT32:
115 break;
116 case OperandType::QUANT_UINT8_ASYMM:
117 softmaxQuant8<uint8_t>();
118 break;
119 case OperandType::QUANT_INT8_ASYMM:
120 softmaxQuant8<int8_t>();
121 break;
122 default:
123 throw std::runtime_error{"SoftMax: unsupported data type"};
124 }
125}

References _input, onert::backend::IPortableTensor::data_type(), and softmaxFloat32().

Referenced by onert::backend::train::ops::SoftMaxLayer::forward(), and package.infer.session::inference().

◆ softmaxFloat32()

void onert::backend::cpu::ops::SoftMaxLayer::softmaxFloat32 ( )

Definition at line 37 of file SoftMaxLayer.cc.

38{
40 {
41 uint32_t input_size = getNumberOfElements(_input);
42 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, 1, _beta, getBuffer<float>(_output));
43 }
44 else if (getNumberOfDimensions(_input) == 2)
45 {
46 uint32_t batch_size = getSizeOfDimension(_input, 0);
47 if (batch_size == 0)
48 throw std::runtime_error("batch_size should not be 0");
49
50 uint32_t input_size = getNumberOfElements(_input) / batch_size;
51 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, batch_size, _beta,
52 getBuffer<float>(_output));
53 }
54 else if (getNumberOfDimensions(_input) == 4)
55 {
57 op_params.beta = _beta;
58 nnfw::cker::Softmax(op_params, getShape(_input), getBuffer<float>(_input), getShape(_output),
59 getBuffer<float>(_output));
60 }
61 else
62 {
64 op_params.beta = _beta;
65 nnfw::cker::reference::Softmax(op_params, getShape(_input), getBuffer<float>(_input),
66 getShape(_output), getBuffer<float>(_output));
67 }
68}
uint32_t getNumberOfElements(const Shape &shape)
Definition Shape.cpp:48
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
void Softmax(const SoftmaxParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition SoftMax.h:43
void Softmax(const float *in, const int input_size, const int batch_size, const float beta, float *out)
Definition SoftMax.h:79
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References _input, _output, nnfw::cker::SoftmaxParams::beta, getNumberOfDimensions(), getNumberOfElements(), onert::backend::cpu::ops::getShape(), getSizeOfDimension(), nnfw::cker::Softmax(), and nnfw::cker::reference::Softmax().

Referenced by run().

◆ softmaxQuant8()

template<typename T >
void onert::backend::cpu::ops::SoftMaxLayer::softmaxQuant8 ( )

Definition at line 70 of file SoftMaxLayer.cc.

71{
73 op_params.scale = _output->data_scale();
74 op_params.zero_point = _output->data_zero_point();
75 op_params.uint8_table1 = _uint8_table1;
76 op_params.uint8_table2 = _uint8_table2;
77 op_params.table = _table;
78
79#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
80 nnfw::cker::SoftmaxInt8LUT<T, T>(op_params, getShape(_input), getBuffer<T>(_input),
81 getShape(_output), getBuffer<T>(_output));
82#else
83 nnfw::cker::Softmax<T, T>(op_params, getShape(_input), getBuffer<T>(_input), getShape(_output),
84 getBuffer<T>(_output));
85#endif
86}
int32_t data_zero_point() const override final

References _input, _output, onert::backend::IPortableTensor::data_scale(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::SoftmaxParams::scale, nnfw::cker::SoftmaxParams::table, nnfw::cker::SoftmaxParams::uint8_table1, nnfw::cker::SoftmaxParams::uint8_table2, and nnfw::cker::SoftmaxParams::zero_point.

Field Documentation

◆ _input

const IPortableTensor* onert::backend::cpu::ops::SoftMaxLayer::_input
protected

◆ _output

IPortableTensor* onert::backend::cpu::ops::SoftMaxLayer::_output
protected

The documentation for this class was generated from the following files: