20#include "kernels/Utils.h"
22#include <tensorflow/lite/kernels/internal/reference/reduce.h>
23#include <tensorflow/lite/kernels/internal/optimized/reduce.h>
32static void resolveAxes(
const int32_t *
axes_data,
int num_axes, tflite::MeanParams *params)
53 for (
int j = 0;
j <
i;
j++)
138 if (
input()->element_type() == DataType::S16)
153 tflite::MeanParams
params{};
167 switch (
input()->element_type())
169 case DataType::FLOAT32:
179 throw std::runtime_error(
"luci-intp Mean Unsupported type.");
183void Mean::evalFloat()
const
188 tflite::MeanParams
params{};
203void Mean::evalQuantized()
const
208 tflite::MeanParams
params{};
215 tflite::optimized_ops::QuantizedMeanOrSum<uint8_t, int>(
225void Mean::evalQuantizedS16()
const
236 constexpr int32_t
output_min = -std::numeric_limits<int16_t>::max();
237 constexpr int32_t
output_max = std::numeric_limits<int16_t>::max();
256 int32_t output_multiplier{};
262 for (int32_t batch = 0; batch <
batches; ++batch)
264 for (int32_t
c = 0;
c < depth; ++
c)
275 tflite::MultiplyByQuantizedMultiplier(
acc, output_multiplier, output_shift);
290 throw std::runtime_error(
"Unsupported configuration.");
const std::vector< Tensor * > & getOutputTensors() const
const ReducerParams _params
const ReducerParams & params() const
int32_t num_elements() const
void resize(const Shape &new_shape)
const Shape & shape() const
Mean(const Tensor *input, const Tensor *axes, Tensor *output, Tensor *temp_index, Tensor *resolved_axes, Tensor *temp_sum, const ReducerParams ¶ms)
void execute() const override
const Tensor * axes() const
const Tensor * input() const
void configure() override
#define LUCI_INTERPRETER_CHECK(cond)
const luci_interpreter::RuntimeShape output_shape
int32_t calcOffset(const Shape &shape, int32_t d0, int32_t d1, int32_t d2, int32_t d3)
tflite::RuntimeShape getTensorShape(const Tensor *tensor)
void quantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
T must_cast(loco::Node *node)