ONE - On-device Neural Engine
|
Namespaces | |
namespace | functor |
Data Structures | |
struct | LaunchConv2DBackpropFilterOp |
struct | LaunchConv2DBackpropInputOp |
struct | LaunchConv2DBackpropInputOpImpl |
Enumerations | |
enum class | ArithmeticType { kAdd , kSub , kMul , kDiv } |
enum class | LossReductionType { SUM_OVER_BATCH_SIZE , SUM } |
Functions | |
void | AveragePool2DGrad (const PoolParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data) |
template<typename T > | |
void | BinaryArithmeticGrad (const Shape &lhs_shape, const T *lhs_data, const Shape &rhs_shape, const T *rhs_data, const Shape &incoming_shape, const T *incoming_data, const Shape &lhs_grad_shape, T *lhs_grad_data, const Shape &rhs_grad_shape, T *rhs_grad_data, ArithmeticType arithmetic_type) |
void | ConvInputGrad (const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &filter_shape, const float *filter_data, const int padding_bottom, const int padding_right, const Shape &grad_shape, float *grad_data) |
void | ConvFilterGrad (const ConvParams ¶ms, const Shape &incoming_shape, const float *incoming_data, const Shape &input_shape, const float *input_data, const int padding_bottom, const int padding_right, const Shape &filter_backprop_shape, float *filter_backprop_data) |
template<typename T > | |
void | backpropInput (const DepthwiseConvParams ¶ms, const Shape &incoming_shape, const T *incoming_data, const Shape &filter_shape, const T *filter_data, T *padded_filter_data, const Shape &grad_shape, T *grad_data, bool pad_filter, T *filter_buffers_data, T *filter_dim_buffers_data) |
template<typename T > | |
void | backpropFilter (const DepthwiseConvParams ¶ms, const Shape &incoming_shape, const T *incoming_data, const Shape &input_shape, const T *input_data, const Shape &filter_grad_shape, T *filter_grad_data, T *padded_filter_data, T *filter_buffers_data) |
template<typename T > | |
void | FullyConnectedBiasGrad (const Shape &incomming_shape, const T *incomming_data, const Shape &grad_shape, T *grad_data) |
template<typename T > | |
T | square (T value) |
template<typename T > | |
T | log_threshold () |
template<typename T > | |
void | MSE (const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &output_shape, T *output_data) |
template<typename T > | |
void | MSEGrad (const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type) |
template<typename T > | |
void | CategoricalCrossEntropy (const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &output_shape, T *output_data) |
template<typename T > | |
void | CategoricalCrossEntropyGrad (const Shape &y_pred_shape, const T *y_pred_data, const Shape &y_true_shape, const T *y_true_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type) |
template<typename T > | |
void | CategoricalCrossEntropyWithLogits (const Shape &logits_shape, const T *logits_data, const Shape &y_true_shape, const T *y_true_data, const Shape &loss_out_shape, T *loss_out_data, const Shape &grad_shape, T *grad_data, LossReductionType reduction_type) |
void | MaxPool2D (const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data, int *arg_max_index) |
void | MaxPool2DGrad (const Shape &incoming_shape, const float *incoming_data, const int *arg_max_index, const Shape &grad_shape, float *grad_data) |
template<typename T > | |
void | Depad (const int32_t *padding_data, int32_t pad_rank, const Shape &input_shape, const T *input_data, const Shape &output_shape, T *output_data) |
template<typename T > | |
void | MeanGrad (const Shape &incoming_shape, const T *incoming_data, const Shape &grad_shape, T *grad_data) |
void | ReLUGrad (const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data) |
void | ReLU6Grad (const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data) |
void | SoftMaxGrad (const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data) |
void | Adam (const Shape &trainable_shape, float *trainable_data, const Shape &grad_shape, const float *grad_data, const Shape &m_shape, float *m_data, const Shape &v_shape, float *v_data, float beta1_power, float beta2_power, float learning_rate, float beta1, float beta2, float epsilon, bool use_nesterov) |
void | GradientDescent (const Shape &output_shape, float *output_data, const Shape &grad_shape, const float *grad_data, float learning_rate) |
|
strong |
|
strong |
|
inline |
Definition at line 33 of file Adam.h.
References nnfw::cker::Tensor::buffer, nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::DimsData(), nnfw::cker::Tensor::flat(), nnfw::cker::eigen_support::GetThreadPoolDevice(), nnfw::cker::Shape::ReplaceWith(), nnfw::cker::Tensor::scalar(), and nnfw::cker::Tensor::shape.
Referenced by onert::backend::train::optimizer::Adam::applyGradient().
|
inline |
Definition at line 33 of file AveragePool.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), nnfw::cker::PoolParams::filter_height, nnfw::cker::PoolParams::filter_width, nnfw::cker::Shape::FlatSize(), nnfw::cker::PaddingValues::height, nnfw::cker::MapAsMatrixWithLastDimAsRows(), nnfw::cker::MatchingDim(), nnfw::cker::NodeOffset(), nnfw::cker::PoolParams::padding_values, nnfw::cker::PoolParams::stride_height, nnfw::cker::PoolParams::stride_width, and nnfw::cker::PaddingValues::width.
void nnfw::cker::train::backpropFilter | ( | const DepthwiseConvParams & | params, |
const Shape & | incoming_shape, | ||
const T * | incoming_data, | ||
const Shape & | input_shape, | ||
const T * | input_data, | ||
const Shape & | filter_grad_shape, | ||
T * | filter_grad_data, | ||
T * | padded_filter_data, | ||
T * | filter_buffers_data | ||
) |
Definition at line 65 of file DepthwiseConv.h.
References nnfw::cker::DepthwiseConvParams::depth_multiplier, nnfw::cker::DepthwiseConvParams::dilation_height_factor, nnfw::cker::DepthwiseConvParams::dilation_width_factor, nnfw::cker::Shape::Dims(), nnfw::cker::PaddingValues::height, nnfw::cker::MatchingDim(), nnfw::cker::DepthwiseConvParams::padding_values, nnfw::cker::DepthwiseConvParams::stride_height, nnfw::cker::DepthwiseConvParams::stride_width, and nnfw::cker::PaddingValues::width.
void nnfw::cker::train::backpropInput | ( | const DepthwiseConvParams & | params, |
const Shape & | incoming_shape, | ||
const T * | incoming_data, | ||
const Shape & | filter_shape, | ||
const T * | filter_data, | ||
T * | padded_filter_data, | ||
const Shape & | grad_shape, | ||
T * | grad_data, | ||
bool | pad_filter, | ||
T * | filter_buffers_data, | ||
T * | filter_dim_buffers_data | ||
) |
Definition at line 32 of file DepthwiseConv.h.
References nnfw::cker::DepthwiseConvParams::depth_multiplier, nnfw::cker::DepthwiseConvParams::dilation_height_factor, nnfw::cker::DepthwiseConvParams::dilation_width_factor, nnfw::cker::Shape::Dims(), nnfw::cker::PaddingValues::height, nnfw::cker::MatchingDim(), nnfw::cker::DepthwiseConvParams::padding_values, nnfw::cker::DepthwiseConvParams::stride_height, nnfw::cker::DepthwiseConvParams::stride_width, and nnfw::cker::PaddingValues::width.
void nnfw::cker::train::BinaryArithmeticGrad | ( | const Shape & | lhs_shape, |
const T * | lhs_data, | ||
const Shape & | rhs_shape, | ||
const T * | rhs_data, | ||
const Shape & | incoming_shape, | ||
const T * | incoming_data, | ||
const Shape & | lhs_grad_shape, | ||
T * | lhs_grad_data, | ||
const Shape & | rhs_grad_shape, | ||
T * | rhs_grad_data, | ||
ArithmeticType | arithmetic_type | ||
) |
Definition at line 39 of file BinaryArithmetic.h.
References nnfw::cker::BroadcastTo(), kAdd, kDiv, kMul, kSub, and nnfw::cker::MapAsVector().
Referenced by onert::backend::train::ops::BinaryArithmeticLayer::backward().
|
inline |
Definition at line 103 of file Loss.h.
References nnfw::cker::Shape::Dims(), nnfw::cker::MapAsMatrixWithLastDimAsRows(), nnfw::cker::MapAsVector(), and output_shape.
Referenced by onert::backend::train::ops::LossCategoricalCrossentropyLayer::forward().
|
inline |
Definition at line 124 of file Loss.h.
References nnfw::cker::Shape::Dims(), nnfw::cker::MapAsMatrixWithLastDimAsRows(), SUM, and SUM_OVER_BATCH_SIZE.
Referenced by onert::backend::train::ops::LossCategoricalCrossentropyLayer::backward().
void nnfw::cker::train::CategoricalCrossEntropyWithLogits | ( | const Shape & | logits_shape, |
const T * | logits_data, | ||
const Shape & | y_true_shape, | ||
const T * | y_true_data, | ||
const Shape & | loss_out_shape, | ||
T * | loss_out_data, | ||
const Shape & | grad_shape, | ||
T * | grad_data, | ||
LossReductionType | reduction_type | ||
) |
Definition at line 160 of file Loss.h.
References nnfw::cker::Tensor::buffer, nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), nnfw::cker::Shape::DimsData(), nnfw::cker::BCast::FromShape(), nnfw::cker::eigen_support::GetThreadPoolDevice(), nnfw::cker::Tensor::matrix(), nnfw::cker::Shape::ReplaceWith(), nnfw::cker::Tensor::shape, SUM, SUM_OVER_BATCH_SIZE, nnfw::cker::Tensor::vec(), nnfw::cker::BCast::x_bcast(), nnfw::cker::BCast::x_reshape(), nnfw::cker::BCast::y_bcast(), and nnfw::cker::BCast::y_reshape().
Referenced by onert::backend::train::ops::LossCategoricalCrossentropyLayer::backward().
|
inline |
Definition at line 225 of file Conv.h.
References nnfw::cker::ConvParams::dilation_height_factor, nnfw::cker::ConvParams::dilation_width_factor, nnfw::cker::Shape::Dims(), nnfw::cker::PaddingValues::height, nnfw::cker::MatchingDim(), nnfw::cker::ConvParams::padding_type, nnfw::cker::ConvParams::padding_values, nnfw::cker::ConvParams::stride_height, nnfw::cker::ConvParams::stride_width, and nnfw::cker::PaddingValues::width.
|
inline |
Definition at line 188 of file Conv.h.
References nnfw::cker::ConvParams::dilation_height_factor, nnfw::cker::ConvParams::dilation_width_factor, nnfw::cker::Shape::Dims(), nnfw::cker::PaddingValues::height, nnfw::cker::MatchingDim(), nnfw::cker::ConvParams::padding_type, nnfw::cker::ConvParams::padding_values, nnfw::cker::ConvParams::stride_height, nnfw::cker::ConvParams::stride_width, and nnfw::cker::PaddingValues::width.
|
inline |
Definition at line 50 of file Pad.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), and output_shape.
|
inline |
Definition at line 31 of file FullyConnected.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), nnfw::cker::Shape::FlatSize(), and nnfw::cker::MapAsMatrixWithLastDimAsRows().
|
inline |
Definition at line 33 of file SGD.h.
References nnfw::cker::Tensor::buffer, nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::DimsData(), nnfw::cker::eigen_support::GetThreadPoolDevice(), output_shape, nnfw::cker::Shape::ReplaceWith(), nnfw::cker::Tensor::scalar(), and nnfw::cker::Tensor::shape.
Referenced by onert::backend::train::optimizer::SGD::applyGradient().
|
inline |
|
inline |
Definition at line 36 of file MaxPool.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), nnfw::cker::PoolParams::filter_height, nnfw::cker::PoolParams::filter_width, nnfw::cker::PoolParams::float_activation_max, nnfw::cker::PoolParams::float_activation_min, nnfw::cker::PaddingValues::height, nnfw::cker::MapAsMatrixWithLastDimAsRows(), nnfw::cker::MatchingDim(), nnfw::cker::NodeOffset(), offset(), output_shape, nnfw::cker::PoolParams::padding_values, nnfw::cker::PoolParams::stride_height, nnfw::cker::PoolParams::stride_width, and nnfw::cker::PaddingValues::width.
|
inline |
Definition at line 129 of file MaxPool.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::FlatSize(), nnfw::cker::MapAsMatrixWithLastDimAsRows(), and nnfw::cker::MatchingDim().
void nnfw::cker::train::MeanGrad | ( | const Shape & | incoming_shape, |
const T * | incoming_data, | ||
const Shape & | grad_shape, | ||
T * | grad_data | ||
) |
Definition at line 32 of file ReduceMean.h.
References nnfw::cker::BroadcastTo(), and nnfw::cker::MapAsMatrixWithLastDimAsRows().
Referenced by onert::backend::train::ops::MeanLayer::backward().
|
inline |
Definition at line 41 of file Loss.h.
References nnfw::cker::Shape::Dims(), nnfw::cker::FlatSizeSkipDim(), output_shape, size, and square().
Referenced by onert::backend::train::ops::LossMeanSquaredErrorLayer::forward().
|
inline |
Definition at line 66 of file Loss.h.
References nnfw::cker::Shape::Dims(), nnfw::cker::FlatSizeSkipDim(), offset(), SUM, and SUM_OVER_BATCH_SIZE.
Referenced by onert::backend::train::ops::LossMeanSquaredErrorLayer::backward().
|
inline |
Definition at line 31 of file ReLU6.h.
References nnfw::cker::MapAsVector(), and output_shape.
Referenced by onert::backend::train::ops::backpropActivation(), and onert::backend::train::ops::ElementwiseActivationLayer::configureBackward().
|
inline |
Definition at line 32 of file ReLU.h.
References nnfw::cker::MapAsVector(), and output_shape.
Referenced by onert::backend::train::ops::backpropActivation(), and onert::backend::train::ops::ElementwiseActivationLayer::configureBackward().
|
inline |
Definition at line 30 of file SoftMax.h.
References nnfw::cker::Shape::DimensionsCount(), nnfw::cker::Shape::Dims(), nnfw::cker::MatchingFlatSize(), and output_shape.
Referenced by onert::backend::train::ops::SoftMaxLayer::backward().