ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::L2NormLayer Class Reference

#include <L2NormLayer.h>

Collaboration diagram for onert::backend::cpu::ops::L2NormLayer:

Public Member Functions

 L2NormLayer ()
 
void configure (const IPortableTensor *_input, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 32 of file L2NormLayer.h.

Constructor & Destructor Documentation

◆ L2NormLayer()

onert::backend::cpu::ops::L2NormLayer::L2NormLayer ( )
inline

Definition at line 35 of file L2NormLayer.h.

35 : _input(nullptr), _output(nullptr)
36 {
37 // Nothing
38 }

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::L2NormLayer::configure ( const IPortableTensor _input,
IPortableTensor output 
)

Definition at line 33 of file L2NormLayer.cc.

34{
35 assert(input != nullptr);
36 assert(output != nullptr);
37
38 _input = input;
39 _output = output;
40}

◆ run()

void onert::backend::cpu::ops::L2NormLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 42 of file L2NormLayer.cc.

43{
44 switch (_input->data_type())
45 {
46 case OperandType::FLOAT32:
47 nnfw::cker::L2NormalizeFloat32(getShape(_input), getBuffer<float>(_input), getShape(_output),
48 getBuffer<float>(_output));
49 break;
50
51 case OperandType::QUANT_UINT8_ASYMM:
52 {
54 assert(_input->data_zero_point() == 128);
55 params.input_zero_point = _input->data_zero_point();
56 nnfw::cker::L2NormalizeQuant8(params, getShape(_input), getBuffer<uint8_t>(_input),
57 getShape(_output), getBuffer<uint8_t>(_output));
58 }
59 break;
60
61 default:
62 throw std::runtime_error{"L2Norm: Unsupported data type"};
63 }
64}
int32_t data_zero_point() const override final
ir::DataType data_type() const override final
void L2NormalizeFloat32(const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition L2Normalize.h:30
void L2NormalizeQuant8(L2NormParams &params, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
Definition L2Normalize.h:56
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References onert::backend::IPortableTensor::data_type(), onert::backend::IPortableTensor::data_zero_point(), onert::backend::cpu::ops::getShape(), nnfw::cker::L2NormParams::input_zero_point, nnfw::cker::L2NormalizeFloat32(), and nnfw::cker::L2NormalizeQuant8().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: