20#include "../KernelGenerator.h"
21#include "../Validator.h"
28void Validator::visit(
const ir::operation::Softmax &) {
_supported =
true; }
30void KernelGenerator::visit(
const ir::operation::Softmax &node)
32 const auto output_index{node.getOutputs().at(0)};
35 const auto beta = node.param().beta;
37 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
38 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
40 auto fn = std::make_unique<ops::SoftMaxLayer>();
42 fn->configure(input_tensor, beta, output_tensor);
68 throw std::runtime_error(
"batch_size should not be 0");
77 op_params.
beta = _beta;
84 op_params.
beta = _beta;
97 op_params.
table = _table;
99#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
118#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
121 nnfw::cker::PopulateSoftmaxUInt8LookupTable(_uint8_table1, _uint8_table2,
_input->
data_scale(),
133 case OperandType::FLOAT32:
136 case OperandType::QUANT_UINT8_ASYMM:
137 softmaxQuant8<uint8_t>();
139 case OperandType::QUANT_INT8_ASYMM:
140 softmaxQuant8<int8_t>();
143 throw std::runtime_error{
"SoftMax: unsupported data type"};
A tensor class that is portable for other backends.
float data_scale() const override final
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
std::unique_ptr< exec::IFunction > _return_fn
void configure(const IPortableTensor *input, const float beta, IPortableTensor *output)
const IPortableTensor * _input
IPortableTensor * _output
uint32_t getNumberOfElements(const Shape &shape)
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
uint32_t getNumberOfDimensions(const Shape &shape)
void Softmax(const SoftmaxParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void Softmax(const float *in, const int input_size, const int batch_size, const float beta, float *out)
void PopulateSoftmaxLookupTable(float *table, float input_scale, float beta)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)