ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::exec::train::TrainableExecutor Class Reference

#include <TrainableExecutor.h>

Collaboration diagram for onert::exec::train::TrainableExecutor:

Public Member Functions

 TrainableExecutor (std::unique_ptr< compiler::train::LoweredTrainableGraph > lowered_graph, backend::train::TrainableBackendContexts &&backend_contexts, const compiler::train::TensorRegistries &tensor_regs, compiler::train::TrainableCodeMap &&code_map, const std::vector< ir::OperationIndex > &forward_order, const std::vector< ir::OperationIndex > &backward_order, const util::TracingCtx *tracing_ctx, const ir::train::LossInfo &training_info)
 Construct a new TrainableExecutor object.
 
const ir::Graphgraph () const final
 Returns graph object.
 
void execute (const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options) override
 Execute with given input/output tensors.
 
uint32_t inputSize () const override
 Get input size.
 
uint32_t outputSize () const override
 Get output size.
 
const ir::OperandInfoinputInfo (uint32_t index) const override
 Get input info at index.
 
const ir::OperandInfooutputInfo (uint32_t index) const override
 Get output info at index.
 
ir::Layout inputLayout (uint32_t index) const override
 Get input layout at index.
 
ir::Layout outputLayout (uint32_t index) const override
 Get output layout at index.
 
void forward (const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options, bool training)
 
void backward (const ExecutionOptions &options, uint32_t training_step)
 
void setIndexedRanks (std::shared_ptr< ir::OperationIndexMap< int64_t > > ranks) final
 Set an ordering on operations.
 
void addObserver (std::unique_ptr< IExecutionObserver > ref)
 
float getLoss (const ir::IOIndex &pred_io_ind) const
 
void iterateTrainableTensors (const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &fn) const
 
backend::train::TrainableBackendContextsgetBackendContexts ()
 
const ExecutionOptionscurrentOptions () const override
 Return current execution configuration.
 
- Public Member Functions inherited from onert::exec::IExecutor
 IExecutor ()=default
 Construct a new IExecutor object.
 
virtual ~IExecutor ()=default
 Destroy the IExecutor object.
 

Detailed Description

Definition at line 35 of file TrainableExecutor.h.

Constructor & Destructor Documentation

◆ TrainableExecutor()

onert::exec::train::TrainableExecutor::TrainableExecutor ( std::unique_ptr< compiler::train::LoweredTrainableGraph lowered_graph,
backend::train::TrainableBackendContexts &&  backend_contexts,
const compiler::train::TensorRegistries tensor_regs,
compiler::train::TrainableCodeMap &&  code_map,
const std::vector< ir::OperationIndex > &  forward_order,
const std::vector< ir::OperationIndex > &  backward_order,
const util::TracingCtx tracing_ctx,
const ir::train::LossInfo training_info 
)

Construct a new TrainableExecutor object.

Parameters
lowered_graphLoweredTrainableGraph object
tensor_buildersTensor builders that are currently used
code_mapir::Operation and its code map

Definition at line 27 of file TrainableExecutor.cc.

35 : _code_map{std::move(code_map)}, _forward_order{std::move(forward_order)},
36 _backward_order{std::move(backward_order)}, _lowered_graph{std::move(lowered_graph)},
37 _backend_contexts{std::move(backend_contexts)},
38 _trainable_graph{_lowered_graph->trainable_graph()}, _tensor_regs{std::move(tensor_regs)},
39 _mutex(), _tracing_ctx(tracing_ctx), _loss_info(loss_info)
40{
41 auto build_tensor_list = [&](const auto &ind_seq, auto &tensors) {
42 assert(tensors.empty());
43 for (auto &&ind : ind_seq)
44 {
45 backend::ITensor *tensor = _tensor_regs.getITensor(ind);
46 assert(tensor != nullptr);
47 auto io_tensor = nnfw::misc::polymorphic_downcast<backend::builtin::IOTensor *>(tensor);
48 tensors.push_back(io_tensor);
49 }
50 };
51 build_tensor_list(_trainable_graph.getInputs(), _input_tensors);
52 build_tensor_list(_trainable_graph.getOutputs(), _output_tensors);
53}
backend::ITensor * getITensor(ir::OperandIndex index) const
const OperandIndexSequence & getOutputs() const override
const OperandIndexSequence & getInputs() const override

References onert::ir::train::TrainableGraph::getInputs(), onert::compiler::train::TensorRegistries::getITensor(), and onert::ir::train::TrainableGraph::getOutputs().

Member Function Documentation

◆ addObserver()

void onert::exec::train::TrainableExecutor::addObserver ( std::unique_ptr< IExecutionObserver ref)
inline

Definition at line 94 of file TrainableExecutor.h.

94{ _observers.add(std::move(ref)); };
void add(std::unique_ptr< IExecutionObserver > &&observer)

References onert::exec::ExecObservers::add().

◆ backward()

void onert::exec::train::TrainableExecutor::backward ( const ExecutionOptions options,
uint32_t  training_step 
)

Definition at line 133 of file TrainableExecutor.cc.

134{
135 // For thread-safe, use mutex
136 // TODO: if all used backends on this executor are thread-safe,
137 // do not need to use mutex (otherwise, use mutex)
138 std::lock_guard<std::mutex> lock(_mutex);
139 _current_options = options;
140
141 // Create observee
142 ExecutionObservee subject(_observers, options);
143
144 backwardImpl(subject, training_step);
145}

Referenced by onert::exec::train::TrainableExecutors::train().

◆ currentOptions()

const ExecutionOptions & onert::exec::train::TrainableExecutor::currentOptions ( ) const
inlineoverridevirtual

Return current execution configuration.

Returns
Current execution configuration

Implements onert::exec::IExecutor.

Definition at line 104 of file TrainableExecutor.h.

104{ return _current_options; }

◆ execute()

void onert::exec::train::TrainableExecutor::execute ( const std::vector< backend::IPortableTensor * > &  inputs,
const std::vector< backend::IPortableTensor * > &  outputs,
const ExecutionOptions options 
)
inlineoverridevirtual

Execute with given input/output tensors.

Input and output tensors must be given.

Parameters
[in]inputsTensors that are passed as inputs
[in]outputsTensors that are passed as outputs
[in]optionsExecution options

Implements onert::exec::IExecutor.

Definition at line 55 of file TrainableExecutor.h.

58 {
59 forward(inputs, outputs, options, false);
60 }
void forward(const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options, bool training)

References forward().

◆ forward()

void onert::exec::train::TrainableExecutor::forward ( const std::vector< backend::IPortableTensor * > &  inputs,
const std::vector< backend::IPortableTensor * > &  outputs,
const ExecutionOptions options,
bool  training 
)

Definition at line 55 of file TrainableExecutor.cc.

58{
59 // For thread-safe, use mutex
60 // TODO: if all used backends on this executor are thread-safe,
61 // do not need to use mutex (otherwise, use mutex)
62 std::lock_guard<std::mutex> lock(_mutex);
63 _current_options = options;
64
65 assert(_input_tensors.size() == inputs.size());
66 for (uint32_t i = 0; i < _input_tensors.size(); ++i)
67 {
68 auto tensor = _input_tensors[i];
69 const auto input = inputs[i];
70 assert(input->buffer() != nullptr || input->get_info().total_size() == 0);
71 assert(tensor != nullptr);
72 tensor->setTensor(input);
73 }
74
75 // Set output(s)
76 assert(_output_tensors.size() == outputs.size());
77 for (uint32_t i = 0; i < _output_tensors.size(); ++i)
78 {
79 auto tensor = _output_tensors[i];
80 const auto output = outputs[i];
81 // Output may not be used on training, so don't check optional
82 assert(tensor != nullptr);
83 tensor->setTensor(output);
84 }
85
86 // Create observee
87 ExecutionObservee subject(_observers, options);
88
89 forwardImpl(subject, training);
90
91 // TODO Update output(s) desc if desc has dynamic input
92}

Referenced by execute().

◆ getBackendContexts()

backend::train::TrainableBackendContexts & onert::exec::train::TrainableExecutor::getBackendContexts ( )
inline

Definition at line 102 of file TrainableExecutor.h.

102{ return _backend_contexts; }

◆ getLoss()

float onert::exec::train::TrainableExecutor::getLoss ( const ir::IOIndex pred_io_ind) const

Definition at line 194 of file TrainableExecutor.cc.

195{
196 const auto &loss_ind = _trainable_graph.getLossIndex(pred_io_ind);
197 if (loss_ind.undefined())
198 throw std::runtime_error{"Loss " + std::to_string(loss_ind.value()) + " is not defined."};
199 backend::ITensor *tensor = _tensor_regs.getITensor(loss_ind);
200 long double sum = 0;
201 for (uint64_t i = 0; i < tensor->getShape().num_elements(); ++i)
202 {
203 sum += reinterpret_cast<float *>(tensor->buffer())[i];
204 }
206 {
207 sum /= tensor->getShape().num_elements();
208 }
209 return static_cast<float>(sum);
210}
OperandIndex getLossIndex(const IOIndex &pred_io_ind) const
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
LossReductionType reduction_type
Definition LossInfo.h:44

References onert::compiler::train::TensorRegistries::getITensor(), onert::ir::train::TrainableGraph::getLossIndex(), onert::ir::train::LossInfo::reduction_type, and onert::ir::train::SumOverBatchSize.

Referenced by onert::exec::train::TrainableExecutors::getLoss().

◆ graph()

const ir::Graph & onert::exec::train::TrainableExecutor::graph ( ) const
inlinefinalvirtual

Returns graph object.

Returns
Graph object

Implements onert::exec::IExecutor.

Definition at line 53 of file TrainableExecutor.h.

53{ return _trainable_graph.graph(); }

References onert::ir::train::TrainableGraph::graph().

Referenced by TopologicalSortHelper.TopologicalSortHelper::add_edge(), and TopologicalSortHelper.TopologicalSortHelper::sort_util().

◆ inputInfo()

const ir::OperandInfo & onert::exec::train::TrainableExecutor::inputInfo ( uint32_t  index) const
inlineoverridevirtual

Get input info at index.

Parameters
[in]indexIndex of input
Returns
Input operand info

Implements onert::exec::IExecutor.

Definition at line 66 of file TrainableExecutor.h.

67 {
68 return _input_tensors[index]->get_info();
69 }
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54

Referenced by onert::exec::train::TrainableExecutors::inputInfo().

◆ inputLayout()

ir::Layout onert::exec::train::TrainableExecutor::inputLayout ( uint32_t  index) const
inlineoverridevirtual

Get input layout at index.

Parameters
[in]indexIndex of input
Returns
Input operand layout

Implements onert::exec::IExecutor.

Definition at line 76 of file TrainableExecutor.h.

76{ return _input_tensors[index]->layout(); }

◆ inputSize()

uint32_t onert::exec::train::TrainableExecutor::inputSize ( ) const
inlineoverridevirtual

Get input size.

Returns
Input size

Implements onert::exec::IExecutor.

Definition at line 62 of file TrainableExecutor.h.

62{ return _input_tensors.size(); }

Referenced by onert::exec::train::TrainableExecutors::inputSize().

◆ iterateTrainableTensors()

void onert::exec::train::TrainableExecutor::iterateTrainableTensors ( const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &  fn) const

Definition at line 212 of file TrainableExecutor.cc.

215{
216 _tensor_regs.iterateTrainableTensors(fn);
217}
void iterateTrainableTensors(const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &fn) const

References onert::compiler::train::TensorRegistries::iterateTrainableTensors().

Referenced by onert::exec::train::TrainableExecutors::iterateTrainableTensors().

◆ outputInfo()

const ir::OperandInfo & onert::exec::train::TrainableExecutor::outputInfo ( uint32_t  index) const
inlineoverridevirtual

Get output info at index.

Parameters
[in]indexIndex of output
Returns
Output operand info

Implements onert::exec::IExecutor.

Definition at line 71 of file TrainableExecutor.h.

72 {
73 return _output_tensors[index]->get_info();
74 }

Referenced by onert::exec::train::TrainableExecutors::outputInfo().

◆ outputLayout()

ir::Layout onert::exec::train::TrainableExecutor::outputLayout ( uint32_t  index) const
inlineoverridevirtual

Get output layout at index.

Parameters
[in]indexIndex of output
Returns
Output operand layout

Implements onert::exec::IExecutor.

Definition at line 78 of file TrainableExecutor.h.

79 {
80 return _output_tensors[index]->layout();
81 }

◆ outputSize()

uint32_t onert::exec::train::TrainableExecutor::outputSize ( ) const
inlineoverridevirtual

Get output size.

Returns
Output size

Implements onert::exec::IExecutor.

Definition at line 64 of file TrainableExecutor.h.

64{ return _output_tensors.size(); }

Referenced by onert::exec::train::TrainableExecutors::outputSize().

◆ setIndexedRanks()

void onert::exec::train::TrainableExecutor::setIndexedRanks ( std::shared_ptr< ir::OperationIndexMap< int64_t > >  )
inlinefinalvirtual

Set an ordering on operations.

Parameters
[in]ranksThe table encoding the ordering

Implements onert::exec::IExecutor.

Definition at line 89 of file TrainableExecutor.h.

90 {
91 _indexed_ranks = std::move(ranks);
92 };

The documentation for this class was generated from the following files: