ONE - On-device Neural Engine
Loading...
Searching...
No Matches
activation_float_helpers.h File Reference
#include "helpers.h"

Go to the source code of this file.

Macros

#define MLA(a, b, c)   ((b) * (c) + (a))
 
#define hard_swish_op(DATA_TYPE, x, A_VAL, B_VAL)    (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
 
#define logistic_op(DATA_TYPE, x, A_VAL, B_VAL)   ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
 
#define tanh_op(DATA_TYPE, x, A_VAL, B_VAL)   ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))
 
#define relu_op(DATA_TYPE, x, A_VAL, B_VAL)   (max((DATA_TYPE)0.0, x))
 
#define brelu_op(DATA_TYPE, x, A_VAL, B_VAL)   (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))
 
#define lu_brelu_op(DATA_TYPE, x, A_VAL, B_VAL)   (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
 
#define lrelu_op(DATA_TYPE, x, A_VAL, B_VAL)    ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
 
#define srelu_op(DATA_TYPE, x, A_VAL, B_VAL)   (log((DATA_TYPE)1.0 + exp(x)))
 
#define elu_op(DATA_TYPE, x, A_VAL, B_VAL)    (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, isgreaterequal(x, (DATA_TYPE)0.0)))
 
#define abs_op(DATA_TYPE, x, A_VAL, B_VAL)   (fabs(x))
 
#define square_op(DATA_TYPE, x, A_VAL, B_VAL)   (x * x)
 
#define sqrt_op(DATA_TYPE, x, A_VAL, B_VAL)   (sqrt(x))
 
#define linear_op(DATA_TYPE, x, A_VAL, B_VAL)   (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
 
#define identity_op(DATA_TYPE, x, A_VAL, B_VAL)   (x)
 
#define ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL)   op##_op(DATA_TYPE, x, A_VAL, B_VAL)
 
#define ACTIVATION(op, DATA_TYPE, x, A_VAL, B_VAL)   ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL)
 

Macro Definition Documentation

◆ abs_op

#define abs_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (fabs(x))

Definition at line 80 of file activation_float_helpers.h.

◆ ACT_OP

#define ACT_OP (   op,
  DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    op##_op(DATA_TYPE, x, A_VAL, B_VAL)

Definition at line 94 of file activation_float_helpers.h.

◆ ACTIVATION

#define ACTIVATION (   op,
  DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL)

Definition at line 96 of file activation_float_helpers.h.

◆ brelu_op

#define brelu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))

Definition at line 63 of file activation_float_helpers.h.

◆ elu_op

#define elu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)     (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, isgreaterequal(x, (DATA_TYPE)0.0)))

Definition at line 76 of file activation_float_helpers.h.

◆ hard_swish_op

#define hard_swish_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)     (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))

Definition at line 50 of file activation_float_helpers.h.

◆ identity_op

#define identity_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (x)

Definition at line 92 of file activation_float_helpers.h.

◆ linear_op

#define linear_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))

Definition at line 89 of file activation_float_helpers.h.

◆ logistic_op

#define logistic_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))

Definition at line 54 of file activation_float_helpers.h.

◆ lrelu_op

#define lrelu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)     ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))

Definition at line 69 of file activation_float_helpers.h.

◆ lu_brelu_op

#define lu_brelu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))

Definition at line 66 of file activation_float_helpers.h.

◆ MLA

#define MLA (   a,
  b,
 
)    ((b) * (c) + (a))

Definition at line 46 of file activation_float_helpers.h.

◆ relu_op

#define relu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (max((DATA_TYPE)0.0, x))

Definition at line 60 of file activation_float_helpers.h.

◆ sqrt_op

#define sqrt_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (sqrt(x))

Definition at line 86 of file activation_float_helpers.h.

◆ square_op

#define square_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (x * x)

Definition at line 83 of file activation_float_helpers.h.

◆ srelu_op

#define srelu_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    (log((DATA_TYPE)1.0 + exp(x)))

Definition at line 73 of file activation_float_helpers.h.

◆ tanh_op

#define tanh_op (   DATA_TYPE,
  x,
  A_VAL,
  B_VAL 
)    ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))

Definition at line 57 of file activation_float_helpers.h.