ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::trix::ops::BulkLayer Class Reference

#include <BulkLayer.h>

Collaboration diagram for onert::backend::trix::ops::BulkLayer:

Public Member Functions

 BulkLayer ()
 
 ~BulkLayer ()
 
void configure (const std::vector< const IPortableTensor * > &inputs, std::vector< IPortableTensor * > &outputs, std::string binary_path, const std::shared_ptr< DevContext > &dev_context)
 
void run () override
 
void prepare () override
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 

Detailed Description

Definition at line 34 of file BulkLayer.h.

Constructor & Destructor Documentation

◆ BulkLayer()

onert::backend::trix::ops::BulkLayer::BulkLayer ( )

Definition at line 30 of file BulkLayer.cc.

30 : _inputs(), _outputs(), _model_id(0), _dev_context(nullptr)
31{
32 // DO NOTHING
33}

◆ ~BulkLayer()

onert::backend::trix::ops::BulkLayer::~BulkLayer ( )

Definition at line 35 of file BulkLayer.cc.

35{ _dev_context->unRegisterModel(_model_id); }

Member Function Documentation

◆ configure()

void onert::backend::trix::ops::BulkLayer::configure ( const std::vector< const IPortableTensor * > &  inputs,
std::vector< IPortableTensor * > &  outputs,
std::string  binary_path,
const std::shared_ptr< DevContext > &  dev_context 
)

Definition at line 37 of file BulkLayer.cc.

40{
41 _inputs = inputs;
42 _outputs = outputs;
43 _dev_context = dev_context;
44 _model_id = _dev_context->registerModel(binary_path);
45}

◆ prepare()

void onert::backend::trix::ops::BulkLayer::prepare ( )
overridevirtual

Reimplemented from onert::exec::IFunction.

Definition at line 68 of file BulkLayer.cc.

69{
70 // DO NOTHING
71}

◆ run()

void onert::backend::trix::ops::BulkLayer::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 47 of file BulkLayer.cc.

48{
49 tensors_data_info in_info;
50 tensors_data_info out_info;
51 setDataInfo(_inputs, &in_info);
52 setDataInfo(_outputs, &out_info);
53
54 input_buffers input_bufs;
55 output_buffers output_bufs;
56 setBuffers(_inputs, &input_bufs);
57 setBuffers(_outputs, &output_bufs);
58
59 size_t batch_size = 1;
60 // TODO Remove this assumption
61 if (_inputs.size() == 1 && _outputs.size() == 1 && _inputs.at(0)->getShape().dim(0) > 1)
62 {
63 batch_size = _inputs.at(0)->getShape().dim(0);
64 }
65 _dev_context->requestRun(_model_id, &input_bufs, &in_info, &output_bufs, &out_info, batch_size);
66}
void setBuffers(const std::vector< T * > &tensors, generic_buffers *buf)
Set the generic_buffers object.
Definition Convert.h:69
void setDataInfo(const std::vector< T * > &tensors, tensors_data_info *info)
Set the tensors_data_info object.
Definition Convert.h:50

References onert::backend::trix::setBuffers(), and onert::backend::trix::setDataInfo().

Referenced by package.infer.session::inference().


The documentation for this class was generated from the following files: