ONE - On-device Neural Engine
Loading...
Searching...
No Matches
OperationUtils.h File Reference
#include <backend/IPortableTensor.h>
#include <ir/DataType.h>
#include <ir/Operand.h>
#include <ir/Padding.h>
#include <ir/operation/RoPE.h>
#include <util/CalculateActivationRange.h>
#include <cker/Shape.h>
#include <cker/Types.h>
#include <limits>
#include <vector>

Go to the source code of this file.

Data Structures

union  onert::backend::cpu::ops::DataPtr
 
union  onert::backend::cpu::ops::ConstDataPtr
 

Namespaces

namespace  onert
 
namespace  onert::backend
 
namespace  onert::backend::cpu
 
namespace  onert::backend::cpu::ops
 

Typedefs

using OperandType = onert::ir::DataType
 

Functions

uint32_t onert::backend::cpu::ops::getNumberOfDimensions (const IPortableTensor *tensor)
 
uint32_t onert::backend::cpu::ops::getNumberOfElements (const IPortableTensor *tensor)
 
uint32_t onert::backend::cpu::ops::getSizeOfDimension (const IPortableTensor *tensor, uint32_t dimensionIdx)
 
nnfw::cker::Shape onert::backend::cpu::ops::getExtendedTensorShape (const IPortableTensor *tensor)
 
nnfw::cker::Shape onert::backend::cpu::ops::getShape (const IPortableTensor *tensor)
 
nnfw::cker::FusedActivationFunctionType onert::backend::cpu::ops::convertActivationType (const ir::Activation activation)
 
int32_t onert::backend::cpu::ops::getAxis (uint32_t rank, int32_t axis)
 
void onert::backend::cpu::ops::QuantizeMultiplier (double double_multiplier, int32_t *quantized_multiplier, int *shift)
 
void onert::backend::cpu::ops::GetQuantizedConvolutionMultiplier (const IPortableTensor *input, const IPortableTensor *filter, const IPortableTensor *bias, const IPortableTensor *output, double *multiplier)
 
void onert::backend::cpu::ops::QuantizeMultiplierGreaterThanOne (double double_multiplier, int32_t *quantized_multiplier, int *left_shift)
 
void onert::backend::cpu::ops::GetQuantizedConvolutionMultipliersAndShifts (float input_scale, float output_scale, const float *filter_scales, size_t filter_scales_size, int num_channels, std::vector< int32_t > &per_channel_output_multiplier, std::vector< int > &per_channel_output_shift)
 
void onert::backend::cpu::ops::CalculateActivationRangeQuantized (ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)
 
bool onert::backend::cpu::ops::HaveSameShapes (const IPortableTensor *input1, const IPortableTensor *input2)
 
int32_t onert::backend::cpu::ops::CalculateInputRadius (int input_integer_bits, int input_left_shift)
 
uint32_t onert::backend::cpu::ops::sizeOfData (OperandType type, const std::vector< int32_t > &dimensions)
 
nnfw::cker::PaddingType onert::backend::cpu::ops::getPaddingType (ir::PaddingType ir_padding_type)
 
std::vector< int32_t > onert::backend::cpu::ops::getReducerAxes (const IPortableTensor *axes)
 
nnfw::cker::RoPEMode onert::backend::cpu::ops::getRoPEMode (ir::operation::RoPE::RoPEMode rope_mode)
 
template<typename T >
const T * onert::backend::cpu::ops::getBuffer (const IPortableTensor *tensor)
 
template<typename T >
T * onert::backend::cpu::ops::getBuffer (IPortableTensor *tensor)
 
template<>
const bool * onert::backend::cpu::ops::getBuffer (const IPortableTensor *tensor)
 
template<>
bool * onert::backend::cpu::ops::getBuffer (IPortableTensor *tensor)
 

Typedef Documentation

◆ OperandType

Definition at line 33 of file OperationUtils.h.