ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::exec::MultiModelExecutors Class Reference

Class to gather executors. More...

#include <MultiModelExecutors.h>

Collaboration diagram for onert::exec::MultiModelExecutors:

Public Member Functions

 MultiModelExecutors (void)=delete
 
 MultiModelExecutors (std::unique_ptr< ir::ModelEdges > model_edges)
 
 MultiModelExecutors (const MultiModelExecutors &)=delete
 
 MultiModelExecutors (MultiModelExecutors &&)=default
 
 ~MultiModelExecutors ()=default
 
void emplace (const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index, std::unique_ptr< IExecutor > exec) override
 Insert executor in executor set.
 
IExecutorat (const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index) const override
 Return executor of index.
 
uint32_t inputSize () const override
 Return executor set's number of input.
 
uint32_t outputSize () const override
 Return executor set's number of output.
 
const ir::OperandInfoinputInfo (const ir::IOIndex &index) const override
 Return NN package input tensor info.
 
const ir::OperandInfooutputInfo (const ir::IOIndex &index) const override
 Return NN package output tensor info.
 
const void * outputBuffer (const ir::IOIndex &index) const final
 Return NN package output buffer.
 
const backend::IPortableTensoroutputTensor (const ir::IOIndex &index) const final
 Return NN package output tensor.
 
void execute (ExecutionContext &ctx) override
 Execute NN package executor set.
 
- Public Member Functions inherited from onert::exec::IExecutors
virtual ~IExecutors ()=default
 Virtual IExecutors destructor.
 
virtual IExecutorentryExecutor () const
 

Detailed Description

Class to gather executors.

Definition at line 47 of file MultiModelExecutors.h.

Constructor & Destructor Documentation

◆ MultiModelExecutors() [1/4]

onert::exec::MultiModelExecutors::MultiModelExecutors ( void  )
delete

◆ MultiModelExecutors() [2/4]

onert::exec::MultiModelExecutors::MultiModelExecutors ( std::unique_ptr< ir::ModelEdges model_edges)
inline

Definition at line 51 of file MultiModelExecutors.h.

52 : _executors{}, _model_edges{std::move(model_edges)}, _edge_tensors{},
53 _is_created_edge_tensors{false}, _pkg_input_tensors{}, _pkg_output_tensors{}
54 {
55 for (const auto &edge : _model_edges->edges)
56 {
57 _edge_map[edge.from].emplace_back(edge.to);
58 }
59 }

◆ MultiModelExecutors() [3/4]

onert::exec::MultiModelExecutors::MultiModelExecutors ( const MultiModelExecutors )
delete

◆ MultiModelExecutors() [4/4]

onert::exec::MultiModelExecutors::MultiModelExecutors ( MultiModelExecutors &&  )
default

◆ ~MultiModelExecutors()

onert::exec::MultiModelExecutors::~MultiModelExecutors ( )
default

Member Function Documentation

◆ at()

IExecutor * onert::exec::MultiModelExecutors::at ( const ir::ModelIndex model_index,
const ir::SubgraphIndex subg_index 
) const
overridevirtual

Return executor of index.

Parameters
[in]model_indexModel index
[in]subg_indexSubgraph index
Returns
Executor

Implements onert::exec::IExecutors.

Definition at line 64 of file MultiModelExecutors.cc.

66{
67 return _executors.at(std::make_pair(model_index, subg_index)).get();
68}

Referenced by execute(), inputInfo(), outputBuffer(), outputInfo(), and outputTensor().

◆ emplace()

void onert::exec::MultiModelExecutors::emplace ( const ir::ModelIndex model_index,
const ir::SubgraphIndex subg_index,
std::unique_ptr< IExecutor exec 
)
overridevirtual

Insert executor in executor set.

Parameters
[in]model_indexModel index
[in]subg_indexSubgraph index
[in]execExecutor to insert

Implements onert::exec::IExecutors.

Definition at line 57 of file MultiModelExecutors.cc.

60{
61 _executors.emplace(std::make_pair(model_index, subg_index), std::move(exec));
62}

◆ execute()

void onert::exec::MultiModelExecutors::execute ( ExecutionContext ctx)
overridevirtual

Execute NN package executor set.

Parameters
[in,out]ctxExecution context. It reflects execution result (ex. output shape inference)

Implements onert::exec::IExecutors.

Definition at line 229 of file MultiModelExecutors.cc.

230{
231 auto &desc = ctx.desc;
232
233 // Check supported multi model package
234 checkSupportedMultimodel();
235
236 // TODO Move creating edge tensors in compilation stage
237 createEdgeTensors();
238
239 // TODO Create IOTensors only once and recreate them only if nnpkg info changes
240 CreatePkgIOTensors(desc);
241
242 // TODO Find better way to schedule order of executors
243 auto const model_count = modelCount();
244
245 auto find_from = [&](const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index,
246 const ir::IOIndex &io_index) {
247 for (const auto &edge : _model_edges->edges)
248 {
249 if ((std::get<ir::ModelIndex>(edge.to) == model_index) &&
250 (std::get<ir::SubgraphIndex>(edge.to) == subg_index) &&
251 (std::get<ir::IOIndex>(edge.to) == io_index))
252 return edge.from;
253 }
254
255 throw std::runtime_error{"Cannot find edge for model input"};
256 };
257
258 // Execute each model
259 // NOTE May be better to use vector instead of unordered_map for _executors
260 for (auto model_index = ir::ModelIndex{0}; model_index.value() < model_count; model_index++)
261 {
262 // Find executor
263 auto executor = at(model_index, ir::SubgraphIndex{0});
264
265 // Set IOTensors
266 // TODO Set internal IOTensors only once
267 std::vector<backend::IPortableTensor *> inputs_inter;
268 std::vector<backend::IPortableTensor *> outputs_inter;
269 auto const input_size = executor->inputSize();
270 auto const output_size = executor->outputSize();
271 inputs_inter.resize(input_size);
272 outputs_inter.resize(output_size);
273
274 // Set inputs of executor
275 // TODO Create layer to allocate/deallocate buffers of EdgeTensor for each executor
276 for (uint32_t i = 0; i < input_size; i++)
277 {
278 const auto input_pkg_index = find_input_index(_model_edges->pkg_inputs, model_index,
280 const auto input_io_desc = ir::IODesc{model_index, ir::SubgraphIndex{0}, ir::IOIndex{i}};
281 if (input_pkg_index != -1)
282 {
283 inputs_inter[i] = _pkg_input_tensors[input_io_desc].get();
284 }
285 else
286 {
287 auto from_iodesc = find_from(model_index, ir::SubgraphIndex{0}, ir::IOIndex{i});
288
289 // Supported only sequantial execution of models
290 assert(std::get<ir::ModelIndex>(from_iodesc).value() < model_index.value());
291 assert(std::get<ir::SubgraphIndex>(from_iodesc).value() == 0);
292 inputs_inter[i] = _edge_tensors.at(from_iodesc).get();
293 assert(inputs_inter[i]->buffer() != nullptr);
294 }
295 }
296
297 // Set outputs of executor
298 for (uint32_t i = 0; i < output_size; i++)
299 {
300 const auto output_pkg_index = find_output_index(_model_edges->pkg_outputs, model_index,
302 const auto output_io_desc = ir::IODesc{model_index, ir::SubgraphIndex{0}, ir::IOIndex{i}};
303 if (output_pkg_index != -1)
304 {
305 outputs_inter[i] = _pkg_output_tensors[output_io_desc].get();
306 }
307 else
308 {
309 // Allocate buffer of `from` tensors
310 const auto from_iodesc = ir::IODesc{model_index, ir::SubgraphIndex{0}, ir::IOIndex{i}};
311 _edge_tensors[from_iodesc]->allocate_buffer();
312 outputs_inter[i] = _edge_tensors[from_iodesc].get();
313
314 // Increase reference count of `from` tensors for edges
315 for (uint32_t i = 0; i < _edge_map[from_iodesc].size(); i++)
316 _edge_tensors[from_iodesc]->increase_ref();
317 }
318 }
319
320 executor->execute(inputs_inter, outputs_inter, ctx.options);
321
322 // Release input buffers that are no longer needed
323 for (uint32_t i = 0; i < input_size; i++)
324 {
325 const auto input_pkg_index = find_input_index(_model_edges->pkg_inputs, model_index,
327 if (input_pkg_index == -1)
328 {
329 // Decrease reference count of `from` tensor if input tensor is the `from` tensor
330 const auto from_iodesc = find_from(model_index, ir::SubgraphIndex{0}, ir::IOIndex{i});
331 _edge_tensors[from_iodesc]->decrease_ref();
332 }
333 }
334
335 // Get dynamic shape inference result
336 for (uint32_t i = 0; i < output_size; i++)
337 {
338 const auto output_pkg_index = find_output_index(_model_edges->pkg_outputs, model_index,
340
341 if (output_pkg_index != -1)
342 {
343 const auto output_io_tensor =
344 dynamic_cast<const backend::builtin::IOTensor *>(outputTensor(ir::IOIndex{i}));
345 if (!output_io_tensor)
346 throw std::runtime_error{"Output tensor must be IOTensor"};
347
348 ctx.desc.outputs[output_pkg_index].info.shape(output_io_tensor->get_info().shape());
349 }
350 }
351 }
352}
Tensor object that indirects to the tensor it is pointing to.
Definition IOTensor.h:46
IExecutor * at(const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index) const override
Return executor of index.
const backend::IPortableTensor * outputTensor(const ir::IOIndex &index) const final
Return NN package output tensor.
T value() const
Return underlying value.
Definition Index.h:137
std::tuple< ModelIndex, SubgraphIndex, IOIndex > IODesc
Definition NNPkg.h:30
virtual uint32_t inputSize() const =0
Get input size.

References at(), onert::exec::ExecutionContext::desc, onert::exec::IExecutor::inputSize(), onert::exec::ExecutionContext::options, onert::exec::IODescription::outputs, outputTensor(), and onert::util::Index< T, DummyTag >::value().

◆ inputInfo()

const ir::OperandInfo & onert::exec::MultiModelExecutors::inputInfo ( const ir::IOIndex index) const
overridevirtual

Return NN package input tensor info.

Parameters
[in]indexInput index
Returns
Tensor info

Implements onert::exec::IExecutors.

Definition at line 74 of file MultiModelExecutors.cc.

75{
76 auto const [model_index, subg_index, io_index] = _model_edges->pkg_inputs[index.value()];
77 auto const executor = at(model_index, subg_index);
78 return executor->inputInfo(io_index.value());
79}
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54

References at(), and onert::util::Index< T, DummyTag >::value().

◆ inputSize()

uint32_t onert::exec::MultiModelExecutors::inputSize ( ) const
overridevirtual

Return executor set's number of input.

Returns
Number of input

Implements onert::exec::IExecutors.

Definition at line 70 of file MultiModelExecutors.cc.

70{ return _model_edges->pkg_inputs.size(); }

◆ outputBuffer()

const void * onert::exec::MultiModelExecutors::outputBuffer ( const ir::IOIndex index) const
finalvirtual

Return NN package output buffer.

Parameters
[in]indexOutput index
Returns
Buffer of output

Implements onert::exec::IExecutors.

Definition at line 88 of file MultiModelExecutors.cc.

89{
90 auto const [model_index, subg_index, io_index] = _model_edges->pkg_outputs[index.value()];
91 auto const executor = at(model_index, subg_index);
92 return static_cast<const void *>(executor->outputBuffer(io_index.value()));
93}

References at(), and onert::util::Index< T, DummyTag >::value().

◆ outputInfo()

const ir::OperandInfo & onert::exec::MultiModelExecutors::outputInfo ( const ir::IOIndex index) const
overridevirtual

Return NN package output tensor info.

Parameters
[in]indexOutput index
Returns
Tensor info

Implements onert::exec::IExecutors.

Definition at line 81 of file MultiModelExecutors.cc.

82{
83 auto const [model_index, subg_index, io_index] = _model_edges->pkg_outputs[index.value()];
84 auto const executor = at(model_index, subg_index);
85 return executor->outputInfo(io_index.value());
86}

References at(), and onert::util::Index< T, DummyTag >::value().

◆ outputSize()

uint32_t onert::exec::MultiModelExecutors::outputSize ( ) const
overridevirtual

Return executor set's number of output.

Returns
Number of output

Implements onert::exec::IExecutors.

Definition at line 72 of file MultiModelExecutors.cc.

72{ return _model_edges->pkg_outputs.size(); }

◆ outputTensor()

const backend::IPortableTensor * onert::exec::MultiModelExecutors::outputTensor ( const ir::IOIndex index) const
finalvirtual

Return NN package output tensor.

Parameters
[in]indexOutput index
Returns
Tensor of output

Implements onert::exec::IExecutors.

Definition at line 95 of file MultiModelExecutors.cc.

96{
97 auto const [model_index, subg_index, io_index] = _model_edges->pkg_outputs[index.value()];
98 auto const executor = at(model_index, subg_index);
99 return executor->outputTensor(io_index.value());
100}

References at(), and onert::util::Index< T, DummyTag >::value().

Referenced by execute().


The documentation for this class was generated from the following files: