ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::exec::FunctionSequence Class Reference

#include <FunctionSequence.h>

Collaboration diagram for onert::exec::FunctionSequence:

Data Structures

struct  DynamicTensorCtx
 

Public Member Functions

template<typename... Args>
 FunctionSequence (Args &&...args)
 
virtual ~FunctionSequence ()=default
 
void run () override
 
void prepare () override
 
void append (std::unique_ptr< IFunction > &&function)
 Appends an IFunction object to the function sequence.
 
void iterate (const std::function< void(IFunction &)> &fn)
 
template<typename T , typename... Args>
void wrap (Args &&...args)
 
void dynamic_tensor_ctx (std::shared_ptr< DynamicTensorCtx > &dynamic_tensor_ctx)
 Prepare to run FunctionSequence which "might" handle dynamic tensor.
 
std::shared_ptr< DynamicTensorCtx > & dynamic_tensor_ctx ()
 
void enableDynamicShapeInferer (bool enable)
 Call this function by passing true if this FunctionSequence handles dynamic tensors and should run DynamicShapeInferer. This function can be called multiple times and if false is passed during multiple calls, DynamicShapeInfere will not be run.
 
void initRunning ()
 Call this function to initialize vars before running.
 
- Public Member Functions inherited from onert::exec::IFunction
virtual ~IFunction ()=default
 

Protected Attributes

std::vector< std::unique_ptr< IFunction > > _functions
 
bool _enable_dynamic_shape_inferer = false
 
std::shared_ptr< DynamicTensorCtx_dynamic_tensor_ctx = nullptr
 

Detailed Description

Definition at line 33 of file FunctionSequence.h.

Constructor & Destructor Documentation

◆ FunctionSequence()

template<typename... Args>
onert::exec::FunctionSequence::FunctionSequence ( Args &&...  args)
inline

Definition at line 36 of file FunctionSequence.h.

37 {
38 (_functions.emplace_back(std::move(args)), ...);
39 }
std::vector< std::unique_ptr< IFunction > > _functions

References _functions.

◆ ~FunctionSequence()

virtual onert::exec::FunctionSequence::~FunctionSequence ( )
virtualdefault

Member Function Documentation

◆ append()

void onert::exec::FunctionSequence::append ( std::unique_ptr< IFunction > &&  function)

Appends an IFunction object to the function sequence.

Parameters
functionIFunction object to be appended

Definition at line 68 of file FunctionSequence.cc.

69{
70 _functions.push_back(std::move(function));
71}

References _functions.

◆ dynamic_tensor_ctx() [1/2]

std::shared_ptr< DynamicTensorCtx > & onert::exec::FunctionSequence::dynamic_tensor_ctx ( )
inline

Definition at line 81 of file FunctionSequence.h.

81{ return _dynamic_tensor_ctx; }
std::shared_ptr< DynamicTensorCtx > _dynamic_tensor_ctx

References _dynamic_tensor_ctx.

Referenced by dynamic_tensor_ctx(), and run().

◆ dynamic_tensor_ctx() [2/2]

void onert::exec::FunctionSequence::dynamic_tensor_ctx ( std::shared_ptr< DynamicTensorCtx > &  dynamic_tensor_ctx)
inline

Prepare to run FunctionSequence which "might" handle dynamic tensor.

Note
Calling this does not mean that run() will handle dynamic tensor. enableDynamicShapeInferer(true) will make run() will handle dynamic tensor.

Definition at line 76 of file FunctionSequence.h.

77 {
79 }
std::shared_ptr< DynamicTensorCtx > & dynamic_tensor_ctx()

References _dynamic_tensor_ctx, and dynamic_tensor_ctx().

◆ enableDynamicShapeInferer()

void onert::exec::FunctionSequence::enableDynamicShapeInferer ( bool  enable)
inline

Call this function by passing true if this FunctionSequence handles dynamic tensors and should run DynamicShapeInferer. This function can be called multiple times and if false is passed during multiple calls, DynamicShapeInfere will not be run.

Note
This must be called before run(). If not called, run() assumes that all tensors are dynamic and DynamicShapeInferer will be run.

Definition at line 90 of file FunctionSequence.h.

References _enable_dynamic_shape_inferer.

Referenced by run().

◆ initRunning()

void onert::exec::FunctionSequence::initRunning ( )
inline

Call this function to initialize vars before running.

Note
When we run a model with static tensor input and then run with dynamic tensor input, _enable_dynamic_shape_inferer is set to false at first run. Once _enable_dynamic_shape_inferer is set to true it cannot be changed to false only with calling enableDynamicShapeInferer(). So initializing it to false is necessary.

Definition at line 104 of file FunctionSequence.h.

References _enable_dynamic_shape_inferer.

◆ iterate()

void onert::exec::FunctionSequence::iterate ( const std::function< void(IFunction &)> &  fn)

Definition at line 73 of file FunctionSequence.cc.

74{
75 for (const auto &func : _functions)
76 {
77 fn(*func);
78 }
79}

References _functions.

◆ prepare()

void onert::exec::FunctionSequence::prepare ( )
overridevirtual

Reimplemented from onert::exec::IFunction.

Definition at line 60 of file FunctionSequence.cc.

61{
62 for (const auto &function : _functions)
63 {
64 function->prepare();
65 }
66}

References _functions.

◆ run()

void onert::exec::FunctionSequence::run ( )
overridevirtual

Implements onert::exec::IFunction.

Definition at line 25 of file FunctionSequence.cc.

26{
28 {
29 // acl_cl and acl_neon backend don't support dynamic shape.
30 // _dynamic_tensor_ctx is always nullptr for acl_cl and acl_neon
31 // Thus, those two bakends cannot reach here.
32
33 // Do dynamic shape inference
34 _dynamic_tensor_ctx->op->accept(*_dynamic_tensor_ctx->dynamic_shape_inferer);
35
36 for (const auto &function : _functions)
37 {
38 // NOTE the function could be also FunctionSequence so we do this
39 // TODO Remove this or do this recursively
40 auto *sub_func_seq = dynamic_cast<FunctionSequence *>(function.get());
41 if (sub_func_seq != nullptr)
42 {
43 sub_func_seq->enableDynamicShapeInferer(true);
44 sub_func_seq->dynamic_tensor_ctx(dynamic_tensor_ctx());
45 }
46
47 // run kernel
48 function->run();
49 }
50 }
51 else
52 {
53 for (const auto &function : _functions)
54 {
55 function->run();
56 }
57 }
58}

References _dynamic_tensor_ctx, _enable_dynamic_shape_inferer, _functions, dynamic_tensor_ctx(), and enableDynamicShapeInferer().

Referenced by onert::exec::Job::run().

◆ wrap()

template<typename T , typename... Args>
void onert::exec::FunctionSequence::wrap ( Args &&...  args)
inline

Definition at line 56 of file FunctionSequence.h.

57 {
58 for (auto &&function : _functions)
59 {
60 function = std::make_unique<T>(std::move(function), args...);
61 }
62 }
args
Definition infer.py:21

References _functions.

Field Documentation

◆ _dynamic_tensor_ctx

std::shared_ptr<DynamicTensorCtx> onert::exec::FunctionSequence::_dynamic_tensor_ctx = nullptr
protected

Definition at line 112 of file FunctionSequence.h.

Referenced by dynamic_tensor_ctx(), dynamic_tensor_ctx(), and run().

◆ _enable_dynamic_shape_inferer

bool onert::exec::FunctionSequence::_enable_dynamic_shape_inferer = false
protected

Definition at line 110 of file FunctionSequence.h.

Referenced by enableDynamicShapeInferer(), initRunning(), and run().

◆ _functions

std::vector<std::unique_ptr<IFunction> > onert::exec::FunctionSequence::_functions
protected

Definition at line 107 of file FunctionSequence.h.

Referenced by append(), FunctionSequence(), iterate(), prepare(), run(), and wrap().


The documentation for this class was generated from the following files: