ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::train::ops Namespace Reference

Data Structures

class  BackPropAccumulator
 
class  BackPropInitializer
 
class  BinaryArithmeticLayer
 
class  ConvolutionLayer
 
class  DepthwiseConvolutionLayer
 
class  ElementwiseActivationLayer
 
class  FullyConnectedLayer
 
class  GradientApplier
 
class  LossCategoricalCrossentropyLayer
 
class  LossLayer
 
class  LossMeanSquaredErrorLayer
 
class  MeanLayer
 
class  PadLayer
 
class  PoolLayer
 
class  ReshapeLayer
 
class  SoftMaxLayer
 
class  TrainingKernelRegistry
 

Typedefs

using OperandType = onert::ir::DataType
 

Enumerations

enum class  ArithmeticType { kAdd , kSub , kMul , kDiv }
 
enum class  ElementwiseActivationType { kReLU }
 
enum class  LossType { kMSE }
 
enum class  PoolType { kMax , kAvg }
 

Functions

nnfw::cker::Shape getShape (const IPortableTensor *tensor)
 Get shape of tensor.
 
const IPortableTensorbackpropActivation (const ir::Activation &activation, const IPortableTensor *output, const IPortableTensor *input_backprop, IPortableTensor *output_backprop)
 backpropagate acitvation
 
void biasGrad (const IPortableTensor *input_backprop, IPortableTensor *bias_grad)
 backpropagate bias
 
nnfw::cker::train::LossReductionType convertLossReductionType (ir::train::LossReductionType type)
 convert loss reduction type
 

Typedef Documentation

◆ OperandType

Enumeration Type Documentation

◆ ArithmeticType

Enumerator
kAdd 
kSub 
kMul 
kDiv 

Definition at line 35 of file BinaryArithmeticLayer.h.

36{
37 kAdd,
38 kSub,
39 kMul,
40 kDiv,
41};

◆ ElementwiseActivationType

Enumerator
kReLU 

Definition at line 34 of file ElementwiseActivationLayer.h.

35{
36 kReLU,
37};

◆ LossType

Enumerator
kMSE 

Definition at line 35 of file LossLayer.h.

◆ PoolType

Enumerator
kMax 
kAvg 

Definition at line 45 of file PoolLayer.h.

46{
47 kMax,
48 kAvg,
49};

Function Documentation

◆ backpropActivation()

const IPortableTensor * onert::backend::train::ops::backpropActivation ( const ir::Activation activation,
const IPortableTensor output,
const IPortableTensor input_backprop,
IPortableTensor output_backprop 
)

backpropagate acitvation

        -- forward direction -->

[ current layer ] -— [ next layer ] [ op | act ]

      <-- backward direction --
Parameters
activationactivation of current layer
outputforward direction's output of current layer
input_backpropbackward direction's output of next layer In other words, incoming gradient to current layer
output_backpropbackward direction's output of activation, In other words, outcoming gradient of current layer's acitvation If activation is NONE, this param can be nullptr
Returns
tensor that holds backpropagate result of activation If activation is NONE, just return input_backprop

Definition at line 50 of file OperationUtils.cc.

54{
55 assert(output != nullptr);
56 assert(input_backprop != nullptr);
57
58 // handle NONE - just propagate incoming gradient
59 if (activation == ir::Activation::NONE)
60 {
61 return input_backprop;
62 }
63
64 assert(output_backprop != nullptr);
65
66 // handle other activation
67 switch (activation)
68 {
69 case ir::Activation::RELU:
70 nnfw::cker::train::ReLUGrad(getShape(output), getBuffer<float>(output),
71 getShape(input_backprop), getBuffer<float>(input_backprop),
72 getShape(output_backprop), getBuffer<float>(output_backprop));
73 break;
74 case ir::Activation::RELU6:
75 nnfw::cker::train::ReLU6Grad(getShape(output), getBuffer<float>(output),
76 getShape(input_backprop), getBuffer<float>(input_backprop),
77 getShape(output_backprop), getBuffer<float>(output_backprop));
78 break;
79 // TODO: Add other activation backpropagation here
80 default:
81 throw std::runtime_error("Unsupported activation type yet");
82 }
83 return output_backprop;
84}
void ReLUGrad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU.h:32
void ReLU6Grad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU6.h:31

References getShape(), onert::ir::NONE, onert::ir::RELU, onert::ir::RELU6, nnfw::cker::train::ReLU6Grad(), and nnfw::cker::train::ReLUGrad().

Referenced by onert::backend::train::ops::BinaryArithmeticLayer::backward().

◆ biasGrad()

void onert::backend::train::ops::biasGrad ( const IPortableTensor input_backprop,
IPortableTensor bias_grad 
)

backpropagate bias

Parameters
input_backpropbackward direction's output of next layer In other words, incoming gradient to current layer
bias_gradgradient tensor of bias

Definition at line 86 of file OperationUtils.cc.

87{
88 assert(bias_grad);
89
90 nnfw::cker::Shape input_backprop_shape = getShape(input_backprop);
91 float *input_backprop_buffer = reinterpret_cast<float *>(input_backprop->buffer());
92
93 nnfw::cker::Shape bias_grad_shape = getShape(bias_grad);
94 float *bias_grad_buffer = getBuffer<float>(bias_grad);
95
96 nnfw::cker::functor::biasReductionHelper(input_backprop_buffer, input_backprop_shape,
97 bias_grad_buffer, bias_grad_shape);
98}
virtual uint8_t * buffer() const =0
void biasReductionHelper(float *input_backprop_buffer, const Shape &input_backprop_shape, float *bias_grad_buffer, const Shape &bias_grad_shape)

References nnfw::cker::functor::biasReductionHelper(), onert::backend::ITensor::buffer(), and getShape().

◆ convertLossReductionType()

nnfw::cker::train::LossReductionType onert::backend::train::ops::convertLossReductionType ( ir::train::LossReductionType  type)

convert loss reduction type

Parameters
typeloss reduction type defined in ir::train::LossReductionType
Returns
corresponding type defined in cker::train::LossReductionType

Definition at line 100 of file OperationUtils.cc.

101{
102 switch (type)
103 {
104 case ir::train::LossReductionType::SumOverBatchSize:
106 case ir::train::LossReductionType::Sum:
108 default:
109 throw std::runtime_error("Unsupported LossReductionType");
110 }
111}

References nnfw::cker::train::SUM, onert::ir::train::Sum, nnfw::cker::train::SUM_OVER_BATCH_SIZE, and onert::ir::train::SumOverBatchSize.

Referenced by onert::backend::train::ops::LossCategoricalCrossentropyLayer::backward(), and onert::backend::train::ops::LossMeanSquaredErrorLayer::backward().

◆ getShape()

nnfw::cker::Shape onert::backend::train::ops::getShape ( const IPortableTensor tensor)