ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
SoftMaxLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "SoftMaxLayer.h"
18
19#include "OperationUtils.h"
20
22
24{
25
26SoftMaxLayer::SoftMaxLayer() : _input(nullptr), _output(nullptr), _beta(0.0)
27{
28 // DO NOTHING
29}
30
32{
34 {
35 uint32_t input_size = getNumberOfElements(_input);
36 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, 1, _beta, getBuffer<float>(_output));
37 }
38 else if (getNumberOfDimensions(_input) == 2)
39 {
40 uint32_t batch_size = getSizeOfDimension(_input, 0);
41 if (batch_size == 0)
42 throw std::runtime_error("batch_size should not be 0");
43
44 uint32_t input_size = getNumberOfElements(_input) / batch_size;
45 nnfw::cker::Softmax(getBuffer<float>(_input), input_size, batch_size, _beta,
46 getBuffer<float>(_output));
47 }
48 else if (getNumberOfDimensions(_input) == 4)
49 {
51 op_params.beta = _beta;
52 nnfw::cker::Softmax(op_params, getShape(_input), getBuffer<float>(_input), getShape(_output),
53 getBuffer<float>(_output));
54 }
55 else
56 {
58 op_params.beta = _beta;
59 nnfw::cker::reference::Softmax(op_params, getShape(_input), getBuffer<float>(_input),
60 getShape(_output), getBuffer<float>(_output));
61 }
62}
63
64template <typename T> void SoftMaxLayer::softmaxQuant8()
65{
67 op_params.scale = _output->data_scale();
68 op_params.zero_point = _output->data_zero_point();
69 op_params.uint8_table1 = _uint8_table1;
70 op_params.uint8_table2 = _uint8_table2;
71 op_params.table = _table;
72
73#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
74 nnfw::cker::SoftmaxInt8LUT<T, T>(op_params, getShape(_input), getBuffer<T>(_input),
75 getShape(_output), getBuffer<T>(_output));
76#else
77 nnfw::cker::Softmax<T, T>(op_params, getShape(_input), getBuffer<T>(_input), getShape(_output),
78 getBuffer<T>(_output));
79#endif
80}
81
82void SoftMaxLayer::configure(const IPortableTensor *input, const float beta,
83 IPortableTensor *output)
84{
85 _input = input;
86 _output = output;
87 _beta = beta;
88
89 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM ||
90 _input->data_type() == OperandType::QUANT_INT8_ASYMM)
91 {
92#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
93 // Only apply when both input & output are uint8/int8 & build with clang
94 // on aarch64.
95 nnfw::cker::PopulateSoftmaxUInt8LookupTable(_uint8_table1, _uint8_table2, _input->data_scale(),
96 _beta);
97#else
99#endif
100 }
101}
102
104{
105 switch (_input->data_type())
106 {
107 case OperandType::FLOAT32:
109 break;
110 case OperandType::QUANT_UINT8_ASYMM:
111 softmaxQuant8<uint8_t>();
112 break;
113 case OperandType::QUANT_INT8_ASYMM:
114 softmaxQuant8<int8_t>();
115 break;
116 default:
117 throw std::runtime_error{"SoftMax: unsupported data type"};
118 }
119}
120
121} // namespace onert::backend::cpu::ops
A tensor class that is portable for other backends.
float data_scale() const override final
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
void configure(const IPortableTensor *input, const float beta, IPortableTensor *output)
uint32_t getNumberOfElements(const Shape &shape)
Definition Shape.cpp:48
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
Definition Shape.cpp:60
uint32_t getNumberOfDimensions(const Shape &shape)
Definition Shape.cpp:58
void Softmax(const SoftmaxParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition SoftMax.h:43
void Softmax(const float *in, const int input_size, const int batch_size, const float beta, float *out)
Definition SoftMax.h:79
void PopulateSoftmaxLookupTable(float *table, float input_scale, float beta)
Definition SoftMax.h:148
nnfw::cker::Shape getShape(const IPortableTensor *tensor)