ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALArithmeticOpCommon.h File Reference
#include "PALUtils.h"
#include "ProcessBroadcastShapes.h"
#include "core/OMKernelData.h"

Go to the source code of this file.

Data Structures

struct  onert_micro::execute::pal::AddFn< T >
 
struct  onert_micro::execute::pal::SubFn< T >
 
struct  onert_micro::execute::pal::MulFn< T >
 
struct  onert_micro::execute::pal::DivFn< T >
 
struct  onert_micro::execute::pal::SquaredDifferenceFn< T >
 

Namespaces

namespace  onert_micro
 
namespace  onert_micro::execute
 
namespace  onert_micro::execute::pal
 

Functions

template<typename T , typename Fn >
OMStatus onert_micro::execute::pal::ArithmeticOp (const core::BinaryArithmeticBroadcastParams &params, const int flat_size, const T *input1_data, const T *input2_data, T *output_data)
 
template<typename T >
void onert_micro::execute::pal::ElementWise (const uint32_t size, const core::ArithmeticQuantParams &params, const T *input1_data, const T *input2_data, T *output_data, T(*binary_func)(T, T, const core::ArithmeticQuantParams &))
 
template<typename T , typename Fn >
void onert_micro::execute::pal::ArithmeticOpScalar (const core::BinaryArithmeticBroadcastParams &params, const int flat_size, const T *input_data, const T scalar_value, T *output_data)
 
template<typename T , typename Fn >
OMStatus onert_micro::execute::pal::BroadcastArithmeticOp4DSlow (const core::BinaryArithmeticBroadcastParams &params, const core::OMRuntimeShape &input1_shape, const T *input1_data, const core::OMRuntimeShape &input2_shape, const T *input2_data, const core::OMRuntimeShape &output_shape, T *output_data)
 
template<typename T >
void onert_micro::execute::pal::BroadcastInput1 (int size, const core::ArithmeticQuantParams &params, const T *input1_data, const T *input2_data, T *output_data, T(*binary_func)(T, T, const core::ArithmeticQuantParams &))
 
template<typename T >
void onert_micro::execute::pal::BroadcastInput2 (int size, const core::ArithmeticQuantParams &params, const T *input1_data, const T *input2_data, T *output_data, T(*binary_func)(T, T, const core::ArithmeticQuantParams &))
 
template<typename T >
void onert_micro::execute::pal::BroadcastRecursiveDimensions (const core::ArithmeticQuantParams &params, int dimension, size_t *input1_offset_p, size_t *input2_offset_p, size_t *output_offset, size_t *compressed_input1_stride, size_t *compressed_input2_stride, size_t *compressed_output_shape, const T *input1_data, const T *input2_data, T *output_data, T(*binary_func)(T, T, const core::ArithmeticQuantParams &))
 
template<typename T >
void onert_micro::execute::pal::BroadcastBinaryFunction6DSlow (const core::ArithmeticQuantParams &params, const core::OMRuntimeShape &input1_shape, const T *input1_data, const core::OMRuntimeShape &input2_shape, const T *input2_data, const core::OMRuntimeShape &output_shape, T *output_data, T(*binary_func)(T, T, const core::ArithmeticQuantParams &))