20#include "../KernelGenerator.h"
21#include "../Validator.h"
34void Validator::visit(
const ir::operation::ElementwiseActivation &) {
_supported =
true; }
36ops::ElementwiseActivationType
54 throw std::runtime_error(
"cpu KernelGenerator : Not supported operation yet");
63 auto output_tensor = _tensor_reg->getPortableTensor(output_index);
64 auto input_tensor = _tensor_reg->getPortableTensor(input_index);
66 auto fn = std::make_unique<ops::ElementwiseActivationLayer>();
80 : _input(nullptr), _output(nullptr), _kernel()
91 const float inverse_scale = 1 / output_scale;
92 int32_t maxval = std::numeric_limits<uint8_t>::max();
93 int32_t minval = std::numeric_limits<uint8_t>::min();
94 for (int32_t val = minval; val <= maxval; ++val)
96 const float dequantized = input_scale * (val - input_zero_point);
97 float transformed = 0.f;
100 transformed = std::tanh(dequantized);
104 transformed = 1.0f / (1.0f + std::exp(-dequantized));
108 throw std::runtime_error(
"ElementwiseActivationLayer : unsupported activation type");
110 const float rescaled = std::round(transformed * inverse_scale);
111 const int32_t quantized =
static_cast<int32_t
>(rescaled + output_zero_point);
112 _table[val] =
static_cast<uint8_t
>(std::max(std::min(maxval, quantized), minval));
120 const uint8_t *input_data = getBuffer<uint8_t>(input);
121 uint8_t *output_data = getBuffer<uint8_t>(output);
123 for (
int i = 0; i <
size; ++i)
125 output_data[i] =
_table[input_data[i]];
130 float alpha,
float beta,
bool approximate,
139 if (input->data_type() == OperandType::FLOAT32)
143 getBuffer<float>(output));
148 throw std::runtime_error{
"ElementwiseActivationLayer(Elu): unsupported data type"};
156 std::placeholders::_1, std::placeholders::_2);
162 getBuffer<float>(output));
167 throw std::runtime_error{
"ElementwiseActivationLayer(Logistic): unsupported data type"};
173 if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
177 getBuffer<float>(output));
180 else if (alpha == 6.f && beta == 0.f)
184 getBuffer<float>(output));
189 throw std::runtime_error(
190 "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
195 throw std::runtime_error{
"ElementwiseActivationLayer(ReLU): unsupported data type"};
203 std::placeholders::_1, std::placeholders::_2);
209 getBuffer<float>(output));
214 throw std::runtime_error{
"ElementwiseActivationLayer(Tanh): unsupported data type"};
222 getBuffer<float>(input),
getShape(output),
223 getBuffer<float>(output));
228 throw std::runtime_error{
"ElementwiseActivationLayer(LeakyReLU): unsupported data type"};
236 getBuffer<float>(input),
getShape(output), getBuffer<float>(output));
241 throw std::runtime_error{
"ElementwiseActivationLayer(GELU): unsupported data type"};
245 throw std::runtime_error(
"ElementwiseActivationLayer: unsupported op type");
int MatchingFlatSize(const Dims< N > &dims, const Dims< N > &check_dims_0)
A tensor class that is portable for other backends.
float data_scale() const override final
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
std::unique_ptr< exec::IFunction > _return_fn
void PopulateLookupTable(const ElementwiseActivationType op_type)
ElementwiseActivationLayer()
std::function< void(const IPortableTensor *input, IPortableTensor *output)> _kernel
void configure(const IPortableTensor *input, IPortableTensor *output, float alpha, float beta, bool approximate, const ElementwiseActivationType op_type)
void EvalUsingLookupTable(const IPortableTensor *input, IPortableTensor *output)
const IPortableTensor * _input
IPortableTensor * _output
const OperandIndex & at(IOIndex set_index) const
const OperandIndexSequence & getOutputs() const override
OperandIndexSequence & getInputs()
const Param & param() const
void LeakyReLU(const LeakyReluParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void Logistic(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void Tanh(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void GELU(const GELUParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void ReLU6(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void ELU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
void ReLU(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
ElementwiseActivationType
ops::ElementwiseActivationType convertElementwiseActivationType(ir::operation::ElementwiseActivation::Type type_ir)