ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::train::ops::ElementwiseActivationLayer Class Reference

#include <ElementwiseActivationLayer.h>

Collaboration diagram for onert::backend::train::ops::ElementwiseActivationLayer:

Public Member Functions

 ElementwiseActivationLayer ()
 
void configureBackward (const IPortableTensor *input, IPortableTensor *back_prop_input, const IPortableTensor *back_prop_output, float alpha, float beta, ElementwiseActivationType op_type)
 
void forward (bool training) override
 
void backward () override
 
- Public Member Functions inherited from onert::exec::train::ITrainableFunction
virtual ~ITrainableFunction ()=default
 
virtual std::optional< backend::train::LayerScopeTensorsregisterLayerScopeTensors ()
 
- Public Member Functions inherited from onert::backend::cpu::ops::ElementwiseActivationLayer
 ElementwiseActivationLayer ()
 
void configure (const IPortableTensor *input, IPortableTensor *output, float alpha, float beta, const ElementwiseActivationType op_type)
 
void run () override
 
void PopulateLookupTable (const ElementwiseActivationType op_type)
 
void EvalUsingLookupTable (const IPortableTensor *input, IPortableTensor *output)
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Additional Inherited Members

- Protected Attributes inherited from onert::backend::cpu::ops::ElementwiseActivationLayer
const IPortableTensor_input
 
IPortableTensor_output
 
uint8_t _table [256]
 
std::function< void(const IPortableTensor *input, IPortableTensor *output)> _kernel
 

Detailed Description

Definition at line 39 of file ElementwiseActivationLayer.h.

Constructor & Destructor Documentation

◆ ElementwiseActivationLayer()

onert::backend::train::ops::ElementwiseActivationLayer::ElementwiseActivationLayer ( )

Definition at line 33 of file ElementwiseActivationLayer.cc.

33 : cpu::ops::ElementwiseActivationLayer()
34{
35 // DO NOTHING
36}

Member Function Documentation

◆ backward()

void onert::backend::train::ops::ElementwiseActivationLayer::backward ( )
overridevirtual

Implements onert::exec::train::ITrainableFunction.

Definition at line 94 of file ElementwiseActivationLayer.cc.

95{
96 _backward_kernel(_output, _back_prop_output, _back_prop_input);
97}

References onert::backend::cpu::ops::ElementwiseActivationLayer::_output.

◆ configureBackward()

void onert::backend::train::ops::ElementwiseActivationLayer::configureBackward ( const IPortableTensor input,
IPortableTensor back_prop_input,
const IPortableTensor back_prop_output,
float  alpha,
float  beta,
ElementwiseActivationType  op_type 
)

Definition at line 38 of file ElementwiseActivationLayer.cc.

43{
44 assert(input != nullptr);
45 assert(back_prop_input != nullptr);
46 assert(back_prop_output != nullptr);
47
48 _back_prop_input = back_prop_input;
49 _back_prop_output = back_prop_output;
50
51 _op_type = op_type;
52
53 switch (op_type)
54 {
56 if (input->data_type() == OperandType::FLOAT32)
57 {
58 if ((alpha == std::numeric_limits<float>::infinity() || alpha == 6.0f) && beta == 0.f)
59 {
60 auto relu_cker = [&alpha]() {
61 if (alpha == std::numeric_limits<float>::infinity())
63 else if (alpha == 6.0f)
65 else
66 throw std::runtime_error{"no supported relu kernel"};
67 }();
68
69 _backward_kernel = [relu_cker](const IPortableTensor *output,
70 const IPortableTensor *incoming,
71 IPortableTensor *outgoing) {
72 relu_cker(getShape(output), getBuffer<float>(output), getShape(incoming),
73 getBuffer<float>(incoming), getShape(outgoing), getBuffer<float>(outgoing));
74 };
75 }
76 else
77 {
78 throw std::runtime_error(
79 "train ElementwiseActivationLayer : Unsupported ReLU activation type");
80 }
81 }
82 else
83 {
84 throw std::runtime_error("train ElementwiseActivationLayer: Unsupported datatype");
85 }
86 break;
87 default:
88 throw std::runtime_error("train ElementwiseActivationLayer: Unsupported activation type yet");
89 }
90}
void ReLUGrad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU.h:32
void ReLU6Grad(const Shape &output_shape, const float *output_data, const Shape &incoming_shape, const float *incoming_data, const Shape &grad_shape, float *grad_data)
Definition ReLU6.h:31
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Get shape of tensor.

References onert::backend::train::ops::getShape(), onert::backend::train::ops::kReLU, nnfw::cker::train::ReLU6Grad(), and nnfw::cker::train::ReLUGrad().

◆ forward()

void onert::backend::train::ops::ElementwiseActivationLayer::forward ( bool  training)
overridevirtual

The documentation for this class was generated from the following files: