ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::AddNLayer Class Reference

#include <AddNLayer.h>

Collaboration diagram for onert::backend::cpu::ops::AddNLayer:

Public Member Functions

 AddNLayer ()
 
void configure (std::vector< const IPortableTensor * > &&inputs, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 27 of file AddNLayer.h.

Constructor & Destructor Documentation

◆ AddNLayer()

onert::backend::cpu::ops::AddNLayer::AddNLayer ( )
inline

Definition at line 30 of file AddNLayer.h.

30: _inputs(), _output(nullptr) {}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::AddNLayer::configure ( std::vector< const IPortableTensor * > &&  inputs,
IPortableTensor output 
)

Definition at line 53 of file AddNLayer.cc.

54{
55 _inputs = std::move(inputs);
56 _output = output;
57}

◆ run()

void onert::backend::cpu::ops::AddNLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 59 of file AddNLayer.cc.

60{
61 size_t input_size = _inputs.size();
62 if (_output->data_type() == ir::DataType::INT32)
63 {
64 std::vector<const int32_t *> input_buffers(input_size);
65 for (size_t i = 0; i < input_size; i++)
66 {
67 input_buffers[i] = getBuffer<int32_t>(_inputs[i]);
68 }
69 AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<int32_t>(_output));
70 }
71 else if (_output->data_type() == ir::DataType::FLOAT32)
72 {
73 std::vector<const float *> input_buffers(input_size);
74 for (size_t i = 0; i < input_size; i++)
75 {
76 input_buffers[i] = getBuffer<float>(_inputs[i]);
77 }
78 AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<float>(_output));
79 }
80 else
81 {
82 throw std::runtime_error("AddN: unsupported data type");
83 }
84}
ir::DataType data_type() const override final
void AddN(const size_t flat_size, const size_t num_inputs, const T *const *input_data, T *output_data)
Definition PALAddN.h:29
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References onert::backend::IPortableTensor::data_type(), and onert::backend::cpu::ops::getShape().


The documentation for this class was generated from the following files: