18#include "kernels/Utils.h"
54 case Activation::NONE:
55 *activation_min = std::numeric_limits<T>::lowest();
56 *activation_max = std::numeric_limits<T>::max();
58 case Activation::RELU:
60 *activation_max = std::numeric_limits<T>::max();
62 case Activation::RELU_N1_TO_1:
66 case Activation::RELU6:
71 assert(
false &&
"Unsupported activation.");
76 int32_t
n_col, int32_t *output)
95 for (
int i = 0;
i < N; ++
i)
106 float *activation_max);
108 int32_t *activation_max);
115 const float x_log2 = std::log(x) * (1.0f / std::log(2.0f));
127 (1LL << input_left_shift);
134static void calculateActivationRangeQuantizedImpl(
Activation activation, int32_t
qmin, int32_t
qmax,
135 int32_t zero_point,
float scale,
136 int32_t *activation_min, int32_t *activation_max)
138 auto quantize = [scale, zero_point](
float x) {
139 return zero_point +
static_cast<int32_t
>(std::round(x / scale));
144 case Activation::NONE:
145 case Activation::TANH:
146 *activation_min =
qmin;
147 *activation_max =
qmax;
149 case Activation::RELU:
150 *activation_min = std::max(
qmin, quantize(0.0f));
151 *activation_max =
qmax;
153 case Activation::RELU_N1_TO_1:
154 *activation_min = std::max(
qmin, quantize(-1.0f));
155 *activation_max = std::min(
qmax, quantize(1.0f));
157 case Activation::RELU6:
158 *activation_min = std::max(
qmin, quantize(0.0f));
159 *activation_max = std::min(
qmax, quantize(6.0f));
162 assert(
false &&
"Unsupported activation.");
166static void calculateActivationRangeQuantizedImpl(
Activation activation, int32_t
qmin, int32_t
qmax,
167 const circle::Tensor *output,
168 int32_t *activation_min, int32_t *activation_max)
170 const float scale = Tensor::scale(output);
171 const int32_t zero_point = Tensor::zero_point(output);
173 calculateActivationRangeQuantizedImpl(activation,
qmin,
qmax, zero_point, zero_point,
174 activation_min, activation_max);
178 float output_scale,
DataType data_type,
179 int32_t *activation_min, int32_t *activation_max)
187 qmax = std::numeric_limits<uint8_t>::max();
190 qmin = -std::numeric_limits<int8_t>::max();
191 qmax = std::numeric_limits<int8_t>::max();
195 assert(output_zero_point == 0);
196 qmin = -std::numeric_limits<int16_t>::max();
197 qmax = std::numeric_limits<int16_t>::max();
200 assert(
false &&
"Unsupported type.");
203 calculateActivationRangeQuantizedImpl(activation,
qmin,
qmax, output_zero_point, output_scale,
204 activation_min, activation_max);
208 int32_t *activation_min, int32_t *activation_max)
210 assert(Tensor::zero_points(output).
size() == 1);
211 const float scale = Tensor::scale(output);
212 const int32_t zero_point = Tensor::zero_point(output);
214 activation_min, activation_max);
234 assert(
q_fixed <= std::numeric_limits<int32_t>::max());
266 const circle::Tensor *input2)
luci_interpreter::RuntimeShape * getDynamicShapeTensor(const circle::Tensor *tensor)
void setDim(int i, int32_t val)
void resize(int dimensions_count)
#define LUCI_INTERPRETER_CHECK(cond)
const luci_interpreter::RuntimeShape output_shape
bool checkedLog2(const float x, int *log2_result)
int calculateInputRadius(int input_integer_bits, int input_left_shift, int total_signed_bits)
Shape calculateShapeForBroadcast(const Shape &input1_shape, const Shape &input2_shape)
bool areShapesEqual(const luci_interpreter::RuntimeShape &input_shape1, const luci_interpreter::RuntimeShape &input_shape2)
tflite::RuntimeShape getTensorShape(const Tensor *tensor)
luci_interpreter::FusedActFunc Activation
void calculateActivationRange(Activation activation, T *activation_min, T *activation_max)
void quantizeMultiplierSmallerThanOneExp(double double_multiplier, int32_t *quantized_multiplier, int *left_shift)
void calculateActivationRangeQuantized(Activation activation, const Tensor *output, int32_t *activation_min, int32_t *activation_max)
void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
void matrixScalarMultiplyAccumulate(const int8_t *matrix, int32_t scalar, int32_t n_row, int32_t n_col, int32_t *output)
luci_interpreter::RuntimeShape getTensorRuntimeShape(const circle::Tensor *circle_tensor, BaseRuntimeGraph *runtime_graph)
DataType
"scalar" value type
T must_cast(loco::Node *node)
Index shift(const Index &in_index, const Shape &shift_from)
const loco::Dimension & dim(uint32_t axis) const