ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::exec::train::TrainableExecutor Class Reference

#include <TrainableExecutor.h>

Collaboration diagram for onert::exec::train::TrainableExecutor:

Public Member Functions

 TrainableExecutor (std::unique_ptr< compiler::train::LoweredTrainableGraph > lowered_graph, backend::train::TrainableBackendContexts &&backend_contexts, const compiler::train::TensorRegistries &tensor_regs, compiler::train::TrainableCodeMap &&code_map, const std::vector< ir::OperationIndex > &forward_order, const std::vector< ir::OperationIndex > &backward_order, const util::TracingCtx *tracing_ctx, const ir::train::LossInfo &training_info)
 Construct a new TrainableExecutor object.
 
const ir::Graphgraph () const final
 Returns graph object.
 
void execute (const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options) override
 Execute with given input/output tensors.
 
uint32_t inputSize () const override
 Get input size.
 
uint32_t outputSize () const override
 Get output size.
 
const ir::OperandInfoinputInfo (uint32_t index) const override
 Get input info at index.
 
const ir::OperandInfooutputInfo (uint32_t index) const override
 Get output info at index.
 
ir::Layout inputLayout (uint32_t index) const override
 Get input layout at index.
 
ir::Layout outputLayout (uint32_t index) const override
 Get output layout at index.
 
void forward (const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options, bool training)
 
void backward (const ExecutionOptions &options, uint32_t training_step)
 
void setIndexedRanks (std::shared_ptr< ir::OperationIndexMap< int64_t > > ranks) final
 Set an ordering on operations.
 
void addObserver (std::unique_ptr< IExecutionObserver > ref)
 
float getLoss (const ir::IOIndex &pred_io_ind) const
 
void iterateTrainableTensors (const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &fn) const
 
backend::train::TrainableBackendContextsgetBackendContexts ()
 
const ExecutionOptionscurrentOptions () const override
 Return current execution configuration.
 
- Public Member Functions inherited from onert::exec::IExecutor
 IExecutor ()=default
 Construct a new IExecutor object.
 
virtual ~IExecutor ()=default
 Destroy the IExecutor object.
 

Detailed Description

Definition at line 39 of file TrainableExecutor.h.

Constructor & Destructor Documentation

◆ TrainableExecutor()

onert::exec::train::TrainableExecutor::TrainableExecutor ( std::unique_ptr< compiler::train::LoweredTrainableGraph lowered_graph,
backend::train::TrainableBackendContexts &&  backend_contexts,
const compiler::train::TensorRegistries tensor_regs,
compiler::train::TrainableCodeMap &&  code_map,
const std::vector< ir::OperationIndex > &  forward_order,
const std::vector< ir::OperationIndex > &  backward_order,
const util::TracingCtx tracing_ctx,
const ir::train::LossInfo training_info 
)

Construct a new TrainableExecutor object.

Parameters
lowered_graphLoweredTrainableGraph object
tensor_buildersTensor builders that are currently used
code_mapir::Operation and its code map

Definition at line 31 of file TrainableExecutor.cc.

39 : _code_map{std::move(code_map)}, _forward_order{std::move(forward_order)},
40 _backward_order{std::move(backward_order)}, _lowered_graph{std::move(lowered_graph)},
41 _backend_contexts{std::move(backend_contexts)},
42 _trainable_graph{_lowered_graph->trainable_graph()}, _tensor_regs{std::move(tensor_regs)},
43 _mutex(), _tracing_ctx(tracing_ctx), _loss_info(loss_info)
44{
45 auto build_tensor_list = [&](const auto &ind_seq, auto &tensors) {
46 assert(tensors.empty());
47 for (auto &&ind : ind_seq)
48 {
49 backend::ITensor *tensor = _tensor_regs.getITensor(ind);
50 assert(tensor != nullptr);
51 auto io_tensor = nnfw::misc::polymorphic_downcast<backend::builtin::IOTensor *>(tensor);
52 tensors.push_back(io_tensor);
53 }
54 };
55 build_tensor_list(_trainable_graph.getInputs(), _input_tensors);
56 build_tensor_list(_trainable_graph.getOutputs(), _output_tensors);
57}
backend::ITensor * getITensor(ir::OperandIndex index) const
const OperandIndexSequence & getOutputs() const override
const OperandIndexSequence & getInputs() const override

References onert::ir::train::TrainableGraph::getInputs(), onert::compiler::train::TensorRegistries::getITensor(), and onert::ir::train::TrainableGraph::getOutputs().

Member Function Documentation

◆ addObserver()

void onert::exec::train::TrainableExecutor::addObserver ( std::unique_ptr< IExecutionObserver ref)
inline

Definition at line 98 of file TrainableExecutor.h.

98{ _observers.add(std::move(ref)); };
void add(std::unique_ptr< IExecutionObserver > &&observer)

References onert::exec::ExecObservers::add().

◆ backward()

void onert::exec::train::TrainableExecutor::backward ( const ExecutionOptions options,
uint32_t  training_step 
)

Definition at line 137 of file TrainableExecutor.cc.

138{
139 // For thread-safe, use mutex
140 // TODO: if all used backends on this executor are thread-safe,
141 // do not need to use mutex (otherwise, use mutex)
142 std::lock_guard<std::mutex> lock(_mutex);
143 _current_options = options;
144
145 // Create observee
146 ExecutionObservee subject(_observers, options);
147
148 backwardImpl(subject, training_step);
149}

Referenced by onert::exec::train::TrainableExecutors::train().

◆ currentOptions()

const ExecutionOptions & onert::exec::train::TrainableExecutor::currentOptions ( ) const
inlineoverridevirtual

Return current execution configuration.

Returns
Current execution configuration

Implements onert::exec::IExecutor.

Definition at line 108 of file TrainableExecutor.h.

108{ return _current_options; }

◆ execute()

void onert::exec::train::TrainableExecutor::execute ( const std::vector< backend::IPortableTensor * > &  inputs,
const std::vector< backend::IPortableTensor * > &  outputs,
const ExecutionOptions options 
)
inlineoverridevirtual

Execute with given input/output tensors.

Input and output tensors must be given.

Parameters
[in]inputsTensors that are passed as inputs
[in]outputsTensors that are passed as outputs
[in]optionsExecution options

Implements onert::exec::IExecutor.

Definition at line 59 of file TrainableExecutor.h.

62 {
63 forward(inputs, outputs, options, false);
64 }
void forward(const std::vector< backend::IPortableTensor * > &inputs, const std::vector< backend::IPortableTensor * > &outputs, const ExecutionOptions &options, bool training)

References forward().

◆ forward()

void onert::exec::train::TrainableExecutor::forward ( const std::vector< backend::IPortableTensor * > &  inputs,
const std::vector< backend::IPortableTensor * > &  outputs,
const ExecutionOptions options,
bool  training 
)

Definition at line 59 of file TrainableExecutor.cc.

62{
63 // For thread-safe, use mutex
64 // TODO: if all used backends on this executor are thread-safe,
65 // do not need to use mutex (otherwise, use mutex)
66 std::lock_guard<std::mutex> lock(_mutex);
67 _current_options = options;
68
69 assert(_input_tensors.size() == inputs.size());
70 for (uint32_t i = 0; i < _input_tensors.size(); ++i)
71 {
72 auto tensor = _input_tensors[i];
73 const auto input = inputs[i];
74 assert(input->buffer() != nullptr || input->get_info().total_size() == 0);
75 assert(tensor != nullptr);
76 tensor->setTensor(input);
77 }
78
79 // Set output(s)
80 assert(_output_tensors.size() == outputs.size());
81 for (uint32_t i = 0; i < _output_tensors.size(); ++i)
82 {
83 auto tensor = _output_tensors[i];
84 const auto output = outputs[i];
85 // Output may not be used on training, so don't check optional
86 assert(tensor != nullptr);
87 tensor->setTensor(output);
88 }
89
90 // Create observee
91 ExecutionObservee subject(_observers, options);
92
93 forwardImpl(subject, training);
94
95 // TODO Update output(s) desc if desc has dynamic input
96}

Referenced by execute().

◆ getBackendContexts()

backend::train::TrainableBackendContexts & onert::exec::train::TrainableExecutor::getBackendContexts ( )
inline

Definition at line 106 of file TrainableExecutor.h.

106{ return _backend_contexts; }

◆ getLoss()

float onert::exec::train::TrainableExecutor::getLoss ( const ir::IOIndex pred_io_ind) const

Definition at line 198 of file TrainableExecutor.cc.

199{
200 const auto &loss_ind = _trainable_graph.getLossIndex(pred_io_ind);
201 if (loss_ind.undefined())
202 throw std::runtime_error{"Loss " + std::to_string(loss_ind.value()) + " is not defined."};
203 backend::ITensor *tensor = _tensor_regs.getITensor(loss_ind);
204 long double sum = 0;
205 for (uint64_t i = 0; i < tensor->getShape().num_elements(); ++i)
206 {
207 sum += reinterpret_cast<float *>(tensor->buffer())[i];
208 }
210 {
211 sum /= tensor->getShape().num_elements();
212 }
213 return static_cast<float>(sum);
214}
OperandIndex getLossIndex(const IOIndex &pred_io_ind) const
uint32_t num_elements(const Shape &shape)
The number of elements of a feature map of a given shape.
Definition Shape.h:59
LossReductionType reduction_type
Definition LossInfo.h:45

References onert::compiler::train::TensorRegistries::getITensor(), onert::ir::train::TrainableGraph::getLossIndex(), onert::ir::train::LossInfo::reduction_type, and onert::ir::train::SumOverBatchSize.

Referenced by onert::exec::train::TrainableExecutors::getLoss().

◆ graph()

const ir::Graph & onert::exec::train::TrainableExecutor::graph ( ) const
inlinefinalvirtual

Returns graph object.

Returns
Graph object

Implements onert::exec::IExecutor.

Definition at line 57 of file TrainableExecutor.h.

57{ return _trainable_graph.graph(); }

References onert::ir::train::TrainableGraph::graph().

Referenced by TopologicalSortHelper.TopologicalSortHelper::add_edge(), and TopologicalSortHelper.TopologicalSortHelper::sort_util().

◆ inputInfo()

const ir::OperandInfo & onert::exec::train::TrainableExecutor::inputInfo ( uint32_t  index) const
inlineoverridevirtual

Get input info at index.

Parameters
[in]indexIndex of input
Returns
Input operand info

Implements onert::exec::IExecutor.

Definition at line 70 of file TrainableExecutor.h.

71 {
72 return _input_tensors[index]->get_info();
73 }
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54

Referenced by onert::exec::train::TrainableExecutors::inputInfo().

◆ inputLayout()

ir::Layout onert::exec::train::TrainableExecutor::inputLayout ( uint32_t  index) const
inlineoverridevirtual

Get input layout at index.

Parameters
[in]indexIndex of input
Returns
Input operand layout

Implements onert::exec::IExecutor.

Definition at line 80 of file TrainableExecutor.h.

80{ return _input_tensors[index]->layout(); }

◆ inputSize()

uint32_t onert::exec::train::TrainableExecutor::inputSize ( ) const
inlineoverridevirtual

Get input size.

Returns
Input size

Implements onert::exec::IExecutor.

Definition at line 66 of file TrainableExecutor.h.

66{ return _input_tensors.size(); }

Referenced by onert::exec::train::TrainableExecutors::inputSize().

◆ iterateTrainableTensors()

void onert::exec::train::TrainableExecutor::iterateTrainableTensors ( const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &  fn) const

Definition at line 216 of file TrainableExecutor.cc.

219{
220 _tensor_regs.iterateTrainableTensors(fn);
221}
void iterateTrainableTensors(const std::function< void(const ir::OperandIndex &, const backend::train::ITrainableTensor *)> &fn) const

References onert::compiler::train::TensorRegistries::iterateTrainableTensors().

Referenced by onert::exec::train::TrainableExecutors::iterateTrainableTensors().

◆ outputInfo()

const ir::OperandInfo & onert::exec::train::TrainableExecutor::outputInfo ( uint32_t  index) const
inlineoverridevirtual

Get output info at index.

Parameters
[in]indexIndex of output
Returns
Output operand info

Implements onert::exec::IExecutor.

Definition at line 75 of file TrainableExecutor.h.

76 {
77 return _output_tensors[index]->get_info();
78 }

Referenced by onert::exec::train::TrainableExecutors::outputInfo().

◆ outputLayout()

ir::Layout onert::exec::train::TrainableExecutor::outputLayout ( uint32_t  index) const
inlineoverridevirtual

Get output layout at index.

Parameters
[in]indexIndex of output
Returns
Output operand layout

Implements onert::exec::IExecutor.

Definition at line 82 of file TrainableExecutor.h.

83 {
84 return _output_tensors[index]->layout();
85 }

◆ outputSize()

uint32_t onert::exec::train::TrainableExecutor::outputSize ( ) const
inlineoverridevirtual

Get output size.

Returns
Output size

Implements onert::exec::IExecutor.

Definition at line 68 of file TrainableExecutor.h.

68{ return _output_tensors.size(); }

Referenced by onert::exec::train::TrainableExecutors::outputSize().

◆ setIndexedRanks()

void onert::exec::train::TrainableExecutor::setIndexedRanks ( std::shared_ptr< ir::OperationIndexMap< int64_t > >  )
inlinefinalvirtual

Set an ordering on operations.

Parameters
[in]ranksThe table encoding the ordering

Implements onert::exec::IExecutor.

Definition at line 93 of file TrainableExecutor.h.

94 {
95 _indexed_ranks = std::move(ranks);
96 };

The documentation for this class was generated from the following files: