|
uint32_t | onert::backend::cpu::ops::getNumberOfDimensions (const IPortableTensor *tensor) |
|
uint32_t | onert::backend::cpu::ops::getNumberOfElements (const IPortableTensor *tensor) |
|
uint32_t | onert::backend::cpu::ops::getSizeOfDimension (const IPortableTensor *tensor, uint32_t dimensionIdx) |
|
void | onert::backend::cpu::ops::QuantizeMultiplier (double double_multiplier, int32_t *quantized_multiplier, int *shift) |
|
void | onert::backend::cpu::ops::GetQuantizedConvolutionMultiplier (const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier) |
|
void | onert::backend::cpu::ops::GetQuantizedConvolutionMultipliersAndShifts (float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift) |
|
void | onert::backend::cpu::ops::QuantizeMultiplierGreaterThanOne (double double_multiplier, int32_t *quantized_multiplier, int *left_shift) |
|
void | onert::backend::cpu::ops::CalculateActivationRangeQuantized (ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max) |
|
bool | onert::backend::cpu::ops::HaveSameShapes (const IPortableTensor *input1, const IPortableTensor *input2) |
|
int32_t | onert::backend::cpu::ops::CalculateInputRadius (int input_integer_bits, int input_left_shift) |
|
uint32_t | onert::backend::cpu::ops::sizeOfData (OperandType type, const std::vector< int32_t > &dimensions) |
|
nnfw::cker::PaddingType | onert::backend::cpu::ops::getPaddingType (ir::PaddingType ir_padding_type) |
|
std::vector< int32_t > | onert::backend::cpu::ops::getReducerAxes (const IPortableTensor *axes) |
|
nnfw::cker::RoPEMode | onert::backend::cpu::ops::getRoPEMode (ir::operation::RoPE::RoPEMode rope_mode) |
|