17#ifndef __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
18#define __NNFW_SUPPORT_NNAPI_OPERATION_UTILS_H__
21#include <ir/DataType.h>
76 const int32_t extended_rank = 4;
77 int32_t raw_shape[extended_rank];
78 auto shape = tensor->getShape();
79 uint32_t src = extended_rank - shape.rank();
80 for (uint32_t i = 0; i < extended_rank; ++i)
88 raw_shape[i] = shape.dim(i - src);
97 if (tensor ==
nullptr)
100 const ir::Shape &shape = tensor->get_info().shape();
101 auto rank = shape.rank();
104 for (
int i = 0; i < rank; ++i)
106 data[i] = shape.dim(i);
129 throw std::runtime_error{
"CPU backend: Cannot convert activation type"};
133inline int32_t
getAxis(uint32_t rank, int32_t axis)
145void QuantizeMultiplier(
double double_multiplier, int32_t *quantized_multiplier,
int *shift);
156 float input_scale,
float output_scale,
const float *filter_scales,
size_t filter_scales_size,
157 int num_channels, std::vector<int32_t> &per_channel_output_multiplier,
158 std::vector<int> &per_channel_output_shift);
161 int32_t *act_min, int32_t *act_max);
177 return reinterpret_cast<const T *
>(tensor->buffer());
182 return reinterpret_cast<T *
>(tensor->buffer());
187 static_assert(
sizeof(bool) == 1,
"cpu backend supports bool type which is 1 byte");
188 return reinterpret_cast<const bool *
>(tensor->buffer());
193 static_assert(
sizeof(bool) == 1,
"cpu backend supports bool type which is 1 byte");
194 return reinterpret_cast<bool *
>(tensor->buffer());
A tensor class that is portable for other backends.
uint32_t getNumberOfElements(const Shape &shape)
uint32_t getSizeOfDimension(const Shape &shape, uint32_t dimensionIdx)
uint32_t getNumberOfDimensions(const Shape &shape)
FusedActivationFunctionType
const T * getBuffer(const IPortableTensor *tensor)
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift)
nnfw::cker::FusedActivationFunctionType convertActivationType(const ir::Activation activation)
nnfw::cker::RoPEMode getRoPEMode(ir::operation::RoPE::RoPEMode rope_mode)
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultipliersAndShifts(float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift)
int32_t getAxis(uint32_t rank, int32_t axis)
void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift)
uint32_t sizeOfData(OperandType type, const std::vector< int32_t > &dimensions)
void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, int *left_shift)
std::vector< int32_t > getReducerAxes(const IPortableTensor *axes)
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)
nnfw::cker::Shape getExtendedTensorShape(const IPortableTensor *tensor)
void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier)
bool HaveSameShapes(const IPortableTensor *input1, const IPortableTensor *input2)