ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::exec::SingleModelExecutors Class Reference

Class to gather executor set for single model NN package. More...

#include <SingleModelExecutors.h>

Collaboration diagram for onert::exec::SingleModelExecutors:

Public Member Functions

 SingleModelExecutors (void)=default
 Construct a new SingleModelExecutors object.
 
 SingleModelExecutors (const SingleModelExecutors &)=delete
 
 SingleModelExecutors (SingleModelExecutors &&)=default
 
 ~SingleModelExecutors ()=default
 Destroy the SingleModelExecutors object.
 
void emplace (const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index, std::unique_ptr< IExecutor > exec) override
 Insert executor in executor set.
 
IExecutorat (const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index) const override
 Return executor of index.
 
uint32_t inputSize () const override
 Return executor set's number of input.
 
uint32_t outputSize () const override
 Return executor set's number of output.
 
const ir::OperandInfoinputInfo (const ir::IOIndex &index) const override
 Return NN package input tensor info.
 
const ir::OperandInfooutputInfo (const ir::IOIndex &index) const override
 Return NN package output tensor info.
 
const void * outputBuffer (const ir::IOIndex &index) const final
 Return NN package output buffer.
 
const backend::IPortableTensoroutputTensor (const ir::IOIndex &index) const final
 Return NN package output tensor.
 
void execute (ExecutionContext &ctx) override
 Execute NN package executor set.
 
- Public Member Functions inherited from onert::exec::IExecutors
virtual ~IExecutors ()=default
 Virtual IExecutors destructor.
 
virtual IExecutorentryExecutor () const
 

Detailed Description

Class to gather executor set for single model NN package.

Definition at line 29 of file SingleModelExecutors.h.

Constructor & Destructor Documentation

◆ SingleModelExecutors() [1/3]

onert::exec::SingleModelExecutors::SingleModelExecutors ( void  )
default

Construct a new SingleModelExecutors object.

◆ SingleModelExecutors() [2/3]

onert::exec::SingleModelExecutors::SingleModelExecutors ( const SingleModelExecutors )
delete

◆ SingleModelExecutors() [3/3]

onert::exec::SingleModelExecutors::SingleModelExecutors ( SingleModelExecutors &&  )
default

◆ ~SingleModelExecutors()

onert::exec::SingleModelExecutors::~SingleModelExecutors ( )
default

Destroy the SingleModelExecutors object.

Member Function Documentation

◆ at()

IExecutor * onert::exec::SingleModelExecutors::at ( const ir::ModelIndex model_index,
const ir::SubgraphIndex subg_index 
) const
overridevirtual

Return executor of index.

Parameters
[in]model_indexModel index
[in]subg_indexSubgraph index
Returns
Executor

Implements onert::exec::IExecutors.

Definition at line 33 of file SingleModelExecutors.cc.

35{
36 return _executors.at(subg_index).get();
37}

◆ emplace()

void onert::exec::SingleModelExecutors::emplace ( const ir::ModelIndex model_index,
const ir::SubgraphIndex subg_index,
std::unique_ptr< IExecutor exec 
)
overridevirtual

Insert executor in executor set.

Parameters
[in]model_indexModel index
[in]subg_indexSubgraph index
[in]execExecutor to insert

Implements onert::exec::IExecutors.

Definition at line 27 of file SingleModelExecutors.cc.

29{
30 _executors.emplace(subg_index, std::move(exec));
31}

◆ execute()

void onert::exec::SingleModelExecutors::execute ( ExecutionContext ctx)
overridevirtual

Execute NN package executor set.

Parameters
[in,out]ctxExecution context. It reflects execution result (ex. output shape inference)

Implements onert::exec::IExecutors.

Definition at line 63 of file SingleModelExecutors.cc.

64{
65 // UserTensor for Input/Output
66 std::vector<std::unique_ptr<backend::builtin::UserTensor>> tensorpool;
67
68 // Input/Output Tensor vector for executor
69 std::vector<backend::IPortableTensor *> inputs(ctx.desc.inputs.size());
70 std::vector<backend::IPortableTensor *> outputs(ctx.desc.outputs.size());
71
72 // Prepare UserTensor for input
73 for (uint32_t i = 0; i < inputs.size(); i++)
74 {
75 auto &desc = ctx.desc.inputs[i];
76
77 // Input is optional if buffer is nullptr, and optional input's size is 0
78 if (desc.buffer == nullptr && (desc.size != 0 || desc.info.total_size() != 0))
79 throw std::runtime_error{"Input " + std::to_string(i) + "'s buffer is not set."};
80
81 tensorpool.emplace_back(std::make_unique<backend::builtin::UserTensor>(
82 desc.info, const_cast<uint8_t *>(static_cast<const uint8_t *>(desc.buffer)), desc.size));
83
84 inputs[i] = tensorpool.back().get();
85 }
86
87 // Prepare UserTensor for output
88 for (uint32_t i = 0; i < outputs.size(); i++)
89 {
90 auto &desc = ctx.desc.outputs[i];
91 const auto output_io_tensor =
92 dynamic_cast<const backend::builtin::IOTensor *>(outputTensor(ir::IOIndex{i}));
93 if (!output_io_tensor)
94 throw std::runtime_error{"Output tensor must be IOTensor"};
95 bool skip_set_output = output_io_tensor->hasBackendTensor();
96
97 // If buffer is nullptr, output is optional or internally allocated buffer,
98 // and optional output's size is 0
99 if (desc.buffer == nullptr && (desc.size != 0 || desc.info.total_size() != 0) &&
100 !skip_set_output)
101 throw std::runtime_error{"Output " + std::to_string(i) + "'s buffer is not set."};
102
103 tensorpool.emplace_back(std::make_unique<backend::builtin::UserTensor>(
104 desc.info, static_cast<uint8_t *>(desc.buffer), desc.size));
105 outputs[i] = tensorpool.back().get();
106 }
107
108 // Executor
109 entryExecutor()->execute(inputs, outputs, ctx.options);
110
111 // Get dynamic shape inference result
112 for (uint32_t i = 0; i < outputs.size(); i++)
113 {
114 const auto output_io_tensor = outputTensor(ir::IOIndex{i});
115 ctx.desc.outputs[i].info.shape(output_io_tensor->get_info().shape());
116 }
117}
virtual IExecutor * entryExecutor() const
Definition IExecutors.h:58
const backend::IPortableTensor * outputTensor(const ir::IOIndex &index) const final
Return NN package output tensor.
::onert::util::Index< uint32_t, IOIndexTag > IOIndex
Definition Index.h:36
virtual void execute(const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options)=0
Execute with given input/output tensors.

References onert::exec::ExecutionContext::desc, onert::exec::IExecutors::entryExecutor(), onert::exec::IExecutor::execute(), onert::backend::builtin::IOTensor::hasBackendTensor(), onert::exec::IODescription::inputs, onert::exec::ExecutionContext::options, onert::exec::IODescription::outputs, and outputTensor().

◆ inputInfo()

const ir::OperandInfo & onert::exec::SingleModelExecutors::inputInfo ( const ir::IOIndex index) const
overridevirtual

Return NN package input tensor info.

Parameters
[in]indexInput index
Returns
Tensor info

Implements onert::exec::IExecutors.

Definition at line 43 of file SingleModelExecutors.cc.

44{
45 return entryExecutor()->inputInfo(index.value());
46}
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54
virtual const ir::OperandInfo & inputInfo(uint32_t index) const =0
Get input info at index.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::inputInfo().

◆ inputSize()

uint32_t onert::exec::SingleModelExecutors::inputSize ( ) const
overridevirtual

Return executor set's number of input.

Returns
Number of input

Implements onert::exec::IExecutors.

Definition at line 39 of file SingleModelExecutors.cc.

39{ return entryExecutor()->inputSize(); }
virtual uint32_t inputSize() const =0
Get input size.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::inputSize().

◆ outputBuffer()

const void * onert::exec::SingleModelExecutors::outputBuffer ( const ir::IOIndex index) const
finalvirtual

Return NN package output buffer.

Parameters
[in]indexOutput index
Returns
Buffer of output

Implements onert::exec::IExecutors.

Definition at line 53 of file SingleModelExecutors.cc.

54{
55 return static_cast<const void *>(entryExecutor()->outputBuffer(index.value()));
56}
virtual const uint8_t * outputBuffer(uint32_t index) const =0
Get output buffer at index.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::outputBuffer().

◆ outputInfo()

const ir::OperandInfo & onert::exec::SingleModelExecutors::outputInfo ( const ir::IOIndex index) const
overridevirtual

Return NN package output tensor info.

Parameters
[in]indexOutput index
Returns
Tensor info

Implements onert::exec::IExecutors.

Definition at line 48 of file SingleModelExecutors.cc.

49{
50 return entryExecutor()->outputInfo(index.value());
51}
virtual const ir::OperandInfo & outputInfo(uint32_t index) const =0
Get output info at index.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::outputInfo().

◆ outputSize()

uint32_t onert::exec::SingleModelExecutors::outputSize ( ) const
overridevirtual

Return executor set's number of output.

Returns
Number of output

Implements onert::exec::IExecutors.

Definition at line 41 of file SingleModelExecutors.cc.

41{ return entryExecutor()->outputSize(); }
virtual uint32_t outputSize() const =0
Get output size.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::outputSize().

◆ outputTensor()

const backend::IPortableTensor * onert::exec::SingleModelExecutors::outputTensor ( const ir::IOIndex index) const
finalvirtual

Return NN package output tensor.

Parameters
[in]indexOutput index
Returns
Tensor of output

Implements onert::exec::IExecutors.

Definition at line 58 of file SingleModelExecutors.cc.

59{
60 return entryExecutor()->outputTensor(index.value());
61}
virtual const backend::IPortableTensor * outputTensor(uint32_t index) const =0
Get output tensor at index.

References onert::exec::IExecutors::entryExecutor(), and onert::exec::IExecutor::outputTensor().

Referenced by execute().


The documentation for this class was generated from the following files: