ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::AddNLayer Class Reference

#include <AddNLayer.h>

Collaboration diagram for onert::backend::cpu::ops::AddNLayer:

Public Member Functions

 AddNLayer ()
 
void configure (std::vector< const IPortableTensor * > &&inputs, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 33 of file AddNLayer.h.

Constructor & Destructor Documentation

◆ AddNLayer()

onert::backend::cpu::ops::AddNLayer::AddNLayer ( )
inline

Definition at line 36 of file AddNLayer.h.

36: _inputs(), _output(nullptr) {}

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::AddNLayer::configure ( std::vector< const IPortableTensor * > &&  inputs,
IPortableTensor output 
)

Definition at line 33 of file AddNLayer.cc.

34{
35 _inputs = std::move(inputs);
36 _output = output;
37}

◆ run()

void onert::backend::cpu::ops::AddNLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 39 of file AddNLayer.cc.

40{
41 size_t input_size = _inputs.size();
42 if (_output->data_type() == ir::DataType::INT32)
43 {
44 std::vector<const int32_t *> input_buffers(input_size);
45 for (size_t i = 0; i < input_size; i++)
46 {
47 input_buffers[i] = getBuffer<int32_t>(_inputs[i]);
48 }
49 AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<int32_t>(_output));
50 }
51 else if (_output->data_type() == ir::DataType::FLOAT32)
52 {
53 std::vector<const float *> input_buffers(input_size);
54 for (size_t i = 0; i < input_size; i++)
55 {
56 input_buffers[i] = getBuffer<float>(_inputs[i]);
57 }
58 AddN(getShape(_inputs[0]), input_size, input_buffers.data(), getBuffer<float>(_output));
59 }
60 else
61 {
62 throw std::runtime_error("AddN: unsupported data type");
63 }
64}
ir::DataType data_type() const override final
void AddN(const Shape &input_shape, const size_t num_inputs, const T **input_data, T *output_data)
Definition AddN.h:29
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References onert::backend::IPortableTensor::data_type(), and onert::backend::cpu::ops::getShape().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: