ONE - On-device Neural Engine
Loading...
Searching...
No Matches
BinaryArithmeticOps.h File Reference
#include <functional>
#include <limits>
#include <utility>
#include "cker/neon/neon_check.h"
#include "cker/operation/reference/BinaryArithmeticOps.h"
#include "cker/Shape.h"
#include "cker/Types.h"
#include "cker/Utils.h"
#include "fixedpoint/fixedpoint.h"

Go to the source code of this file.

Data Structures

struct  nnfw::cker::optimized::BinaryOpFuncAddFloat
 
struct  nnfw::cker::optimized::BinaryOpFuncSubFloat
 
struct  nnfw::cker::optimized::BinaryOpFuncMulFloat
 
struct  nnfw::cker::optimized::BinaryOpFuncDivFloat
 
struct  nnfw::cker::optimized::BinaryOpFuncSwapArgs< BASEOPERATOR >
 
struct  nnfw::cker::optimized::BinaryOpActivationFloatNone
 
struct  nnfw::cker::optimized::BinaryOpActivationFloatMax
 
struct  nnfw::cker::optimized::BinaryOpActivationFloatMinMax
 

Namespaces

namespace  nnfw
 
namespace  nnfw::cker
 
namespace  nnfw::cker::optimized
 

Typedefs

using nnfw::cker::optimized::BinaryOpImplFloatFuncs = std::pair< void(*)(int, const BinaryArithmeticOpParam &, const float *, const float *, float *), void(*)(int, const BinaryArithmeticOpParam &, const float, const float *, float *)>
 

Functions

template<typename ElementwiseF , typename ScalarBroadcastF , typename T >
void nnfw::cker::optimized::BinaryBroadcastFiveFold (const BinaryArithmeticOpParam &params, bool switch_inputs, const Shape &, const T *unswitched_input1_data, const Shape &, const T *unswitched_input2_data, const Shape &, T *output_data, ElementwiseF elementwise_f, ScalarBroadcastF scalar_broadcast_f)
 
template<typename ElementwiseF , typename ScalarBroadcastF , typename T >
void nnfw::cker::optimized::BinaryBroadcastFiveFold (const BinaryArithmeticOpParam &unswitched_params, const Shape &, const T *unswitched_input1_data, const Shape &, const T *unswitched_input2_data, const Shape &, T *output_data, ElementwiseF elementwise_f, ScalarBroadcastF scalar_broadcast_f)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value, int32_t > nnfw::cker::optimized::quant8_sum (const BinaryArithmeticOpParam &params, const T input1_data, const T input2_data)
 
void nnfw::cker::optimized::AddElementwise (int size, const BinaryArithmeticOpParam &params, const uint8_t *input1_data, const uint8_t *input2_data, uint8_t *output_data)
 
void nnfw::cker::optimized::AddElementwise (int size, const BinaryArithmeticOpParam &params, const int8_t *input1_data, const int8_t *input2_data, int8_t *output_data)
 
template<class OPERATOR , class ACTIVATION >
void nnfw::cker::optimized::BinaryOpElementwise (int size, const BinaryArithmeticOpParam &params, const float *input1_data, const float *input2_data, float *output_data)
 
template<class OPERATOR , class ACTIVATION >
void nnfw::cker::optimized::BinaryOpScalarBroadcast (int size, const BinaryArithmeticOpParam &params, const float broadcast_value, const float *input2_data, float *output_data)
 
template<class FUNC >
BinaryOpImplFloatFuncs nnfw::cker::optimized::getBinaryOpWithActivationImplFloat (const BinaryArithmeticOpParam &params)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value > nnfw::cker::optimized::Add (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, const T *input2_data, const Shape &output_shape, T *output_data)
 
void nnfw::cker::optimized::Add (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::AddScalarBroadcast (int size, const BinaryArithmeticOpParam &params, uint8_t broadcast_value, const uint8_t *input2_data, uint8_t *output_data)
 
void nnfw::cker::optimized::AddScalarBroadcast (int size, const BinaryArithmeticOpParam &params, int8_t input1_data, const int8_t *input2_data, int8_t *output_data)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value > nnfw::cker::optimized::BroadcastAddDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, const T *input2_data, const Shape &output_shape, T *output_data)
 
void nnfw::cker::optimized::BroadcastAddDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::Sub (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::BroadcastSubDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value, int32_t > nnfw::cker::optimized::quant8_mul (const BinaryArithmeticOpParam &params, const T input1_data, const T input2_data)
 
void nnfw::cker::optimized::MulElementwise (int size, const BinaryArithmeticOpParam &params, const uint8_t *input1_data, const uint8_t *input2_data, uint8_t *output_data)
 
void nnfw::cker::optimized::MulElementwise (int size, const BinaryArithmeticOpParam &params, const int8_t *input1_data, const int8_t *input2_data, int8_t *output_data)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value > nnfw::cker::optimized::Mul (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, const T *input2_data, const Shape &output_shape, T *output_data)
 
void nnfw::cker::optimized::Mul (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::MulSimpleBroadcast (int size, const BinaryArithmeticOpParam &params, const uint8_t broadcast_value, const uint8_t *input2_data, uint8_t *output_data)
 
void nnfw::cker::optimized::MulSimpleBroadcast (int size, const BinaryArithmeticOpParam &params, const int8_t broadcast_value, const int8_t *input2_data, int8_t *output_data)
 
template<typename T >
std::enable_if_t< is_quant8< T >::value > nnfw::cker::optimized::BroadcastMulDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const T *input1_data, const Shape &input2_shape, const T *input2_data, const Shape &output_shape, T *output_data)
 
void nnfw::cker::optimized::BroadcastMulDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::Div (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)
 
void nnfw::cker::optimized::BroadcastDivDispatch (const BinaryArithmeticOpParam &params, const Shape &input1_shape, const float *input1_data, const Shape &input2_shape, const float *input2_data, const Shape &output_shape, float *output_data)