|
ONE - On-device Neural Engine
|
#include "helpers.h"Go to the source code of this file.
Macros | |
| #define | MLA(a, b, c) ((b) * (c) + (a)) |
| #define | hard_swish_op(DATA_TYPE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667)) |
| #define | logistic_op(DATA_TYPE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x))) |
| #define | tanh_op(DATA_TYPE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x)) |
| #define | relu_op(DATA_TYPE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x)) |
| #define | brelu_op(DATA_TYPE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x))) |
| #define | lu_brelu_op(DATA_TYPE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) |
| #define | lrelu_op(DATA_TYPE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0)) |
| #define | srelu_op(DATA_TYPE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x))) |
| #define | elu_op(DATA_TYPE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, isgreaterequal(x, (DATA_TYPE)0.0))) |
| #define | abs_op(DATA_TYPE, x, A_VAL, B_VAL) (fabs(x)) |
| #define | square_op(DATA_TYPE, x, A_VAL, B_VAL) (x * x) |
| #define | sqrt_op(DATA_TYPE, x, A_VAL, B_VAL) (sqrt(x)) |
| #define | linear_op(DATA_TYPE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x)) |
| #define | identity_op(DATA_TYPE, x, A_VAL, B_VAL) (x) |
| #define | ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, x, A_VAL, B_VAL) |
| #define | ACTIVATION(op, DATA_TYPE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL) |
| #define abs_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (fabs(x)) |
Definition at line 80 of file activation_float_helpers.h.
| #define ACT_OP | ( | op, | |
| DATA_TYPE, | |||
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | op##_op(DATA_TYPE, x, A_VAL, B_VAL) |
Definition at line 94 of file activation_float_helpers.h.
| #define ACTIVATION | ( | op, | |
| DATA_TYPE, | |||
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | ACT_OP(op, DATA_TYPE, x, A_VAL, B_VAL) |
Definition at line 96 of file activation_float_helpers.h.
| #define brelu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x))) |
Definition at line 63 of file activation_float_helpers.h.
| #define elu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, isgreaterequal(x, (DATA_TYPE)0.0))) |
Definition at line 76 of file activation_float_helpers.h.
| #define hard_swish_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667)) |
Definition at line 50 of file activation_float_helpers.h.
| #define identity_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (x) |
Definition at line 92 of file activation_float_helpers.h.
| #define linear_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x)) |
Definition at line 89 of file activation_float_helpers.h.
| #define logistic_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x))) |
Definition at line 54 of file activation_float_helpers.h.
| #define lrelu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0)) |
Definition at line 69 of file activation_float_helpers.h.
| #define lu_brelu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) |
Definition at line 66 of file activation_float_helpers.h.
| #define MLA | ( | a, | |
| b, | |||
| c | |||
| ) | ((b) * (c) + (a)) |
Definition at line 46 of file activation_float_helpers.h.
| #define relu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (max((DATA_TYPE)0.0, x)) |
Definition at line 60 of file activation_float_helpers.h.
| #define sqrt_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (sqrt(x)) |
Definition at line 86 of file activation_float_helpers.h.
| #define square_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (x * x) |
Definition at line 83 of file activation_float_helpers.h.
| #define srelu_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | (log((DATA_TYPE)1.0 + exp(x))) |
Definition at line 73 of file activation_float_helpers.h.
| #define tanh_op | ( | DATA_TYPE, | |
| x, | |||
| A_VAL, | |||
| B_VAL | |||
| ) | ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x)) |
Definition at line 57 of file activation_float_helpers.h.