19#include "kernels/BinaryOpCommon.h"
20#include "kernels/Utils.h"
22#include <tensorflow/lite/kernels/internal/reference/binary_function.h>
23#include <tensorflow/lite/kernels/internal/reference/prelu.h>
50 if (
input()->element_type() == DataType::U8)
53 _alpha_multipliers.resize(1);
56 &_alpha_multipliers[0].shift);
60 else if (
input()->element_type() == DataType::S16)
71 alpha()->shape().dim(
alpha()->quantized_dimension()));
73 input()->shape().dim(
input()->shape().num_dims() - 1));
94 switch (
input()->element_type())
96 case DataType::FLOAT32:
106 throw std::runtime_error(
"luci-intp PRelu Unsupported type.");
110void PRelu::evalFloat()
const
121 tflite::reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>(
130 if (input_data[
i] >= 0)
138void PRelu::evalQuantized()
const
145 op_params.output_shift_1 = _output_shift_identity;
146 op_params.output_multiplier_1 = _output_multiplier_identity;
147 op_params.output_shift_2 = _alpha_multipliers[0].shift;
148 op_params.output_multiplier_2 = _alpha_multipliers[0].multiplier;
152 tflite::reference_ops::BroadcastPrelu4DSlow(
158 tflite::reference_ops::Prelu<uint8_t>(
168 constexpr int32_t
quantized_min = std::numeric_limits<int16_t>::min();
169 constexpr int32_t
quantized_max = std::numeric_limits<int16_t>::max();
173 ? tflite::MultiplyByQuantizedMultiplier(
static_cast<int32_t
>(
input_val),
181void PRelu::evalQuantizedS16()
const
189 const ChannelQuantMultipliers
pos_mult{_output_shift_identity, _output_multiplier_identity};
void resize(const Shape &new_shape)
const Shape & shape() const
const std::vector< int32_t > & zero_points() const
int32_t zero_point() const
void execute() const override
const Tensor * alpha() const
const Tensor * input() const
void configure() override
PRelu(const Tensor *input, const Tensor *alpha, Tensor *output)
#define LUCI_INTERPRETER_CHECK(cond)
__global uchar * offset(const Image *img, int x, int y)
Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape)
std::vector< ChannelQuantMultipliers > quantizeMultipliers(const std::vector< double > &effective_scale)
tflite::RuntimeShape getTensorShape(const Tensor *tensor)
void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
std::vector< double > getQuantizedConvolutionMultiplers(float input_scale, const std::vector< float > &filter_scale, float output_scale)
T must_cast(loco::Node *node)
Index shift(const Index &in_index, const Shape &shift_from)
int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier, int shift)