ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::cpu::ops::EinsumLayer Class Reference

#include <EinsumLayer.h>

Collaboration diagram for onert::backend::cpu::ops::EinsumLayer:

Public Member Functions

 EinsumLayer ()
 
 ~EinsumLayer ()
 
void einsumFloat32 ()
 
void configure (const std::vector< const IPortableTensor * > &inputs, std::string equation, IPortableTensor *output)
 
void run () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 
virtual void prepare ()
 

Detailed Description

Definition at line 44 of file EinsumLayer.h.

Constructor & Destructor Documentation

◆ EinsumLayer()

onert::backend::cpu::ops::EinsumLayer::EinsumLayer ( )

Definition at line 30 of file EinsumLayer.cc.

31 : _inputs(), _output(nullptr), _equation(), _einsum_kernel(new nnfw::cker::Einsum())
32{
33 // DO NOTHING
34}

◆ ~EinsumLayer()

onert::backend::cpu::ops::EinsumLayer::~EinsumLayer ( )
default

Member Function Documentation

◆ configure()

void onert::backend::cpu::ops::EinsumLayer::configure ( const std::vector< const IPortableTensor * > &  inputs,
std::string  equation,
IPortableTensor output 
)

Definition at line 69 of file EinsumLayer.cc.

71{
72 assert(inputs.size() > 0);
73 assert(output != nullptr);
74
75 _inputs = inputs;
76 _equation = equation;
77 _output = output;
78}

◆ einsumFloat32()

void onert::backend::cpu::ops::EinsumLayer::einsumFloat32 ( )

Definition at line 38 of file EinsumLayer.cc.

39{
40 uint32_t num_inputs = _inputs.size();
41 nnfw::cker::Einsum &kernel = *_einsum_kernel;
42
43 kernel.prepare(_equation);
44
45 std::vector<nnfw::cker::Shape> inputShapes;
46 std::vector<const float *> inputFloatPtrs;
47
48 for (uint32_t i = 0; i < num_inputs; i++)
49 {
50 inputShapes.emplace_back(getShape(_inputs[i]));
51 inputFloatPtrs.emplace_back(getBuffer<float>(_inputs[i]));
52 }
53
54 kernel(_equation, inputShapes, inputFloatPtrs, getShape(_output), getBuffer<float>(_output));
55}
void prepare(std::string &equation)
Definition Einsum.h:190
Option< std::vector< int > > inputShapes(optname("--input-shape"), overview("Shape of caffe2 input"), std::vector< int >{}, optional(false), optvalues(""), nullptr, separators(""), showopt(false), IOption::Group::caffe2)
Definition Options.h:32
nnfw::cker::Shape getShape(const IPortableTensor *tensor)

References onert::backend::cpu::ops::getShape(), and nnfw::cker::Einsum::prepare().

Referenced by run().

◆ run()

void onert::backend::cpu::ops::EinsumLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 57 of file EinsumLayer.cc.

58{
59 if (_output->data_type() == OperandType::FLOAT32)
60 {
62 }
63 else
64 {
65 throw std::runtime_error{"Einsum: unsupported data type"};
66 }
67}
ir::DataType data_type() const override final

References onert::backend::IPortableTensor::data_type(), and einsumFloat32().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: