ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::backend::train::TensorPlanner Class Reference

#include <TensorPlanner.h>

Public Member Functions

 TensorPlanner (const ir::train::TrainableGraph &tgraph, const util::Set< ir::OperandIndex > &external_operands)
 
 TensorPlanner (const TensorPlanner &)=delete
 
 TensorPlanner (TensorPlanner &&)=delete
 
TensorPlanneroperator= (const TensorPlanner &)=delete
 
TensorPlanneroperator= (TensorPlanner &&)=delete
 
 ~TensorPlanner ()=default
 
void planNonConstTensors (TensorBuilder *tensor_builder)
 
void planTrainableTensors (TensorBuilder *tensor_builder)
 
void planBackPropTensors (TensorBuilder *tensor_builder)
 
void planGradientTensors (TensorBuilder *tensor_builder)
 
void planDisposableBackPropTensors (TensorBuilder *tensor_builder)
 
void planLayerScopeTensors (TensorBuilder *tensor_builder)
 

Detailed Description

Definition at line 28 of file TensorPlanner.h.

Constructor & Destructor Documentation

◆ TensorPlanner() [1/3]

onert::backend::train::TensorPlanner::TensorPlanner ( const ir::train::TrainableGraph tgraph,
const util::Set< ir::OperandIndex > &  external_operands 
)

Definition at line 24 of file TensorPlanner.cc.

26 : _tgraph{tgraph}, _external_operands{external_operands}
27{
28 // DO NOTHING
29}

◆ TensorPlanner() [2/3]

onert::backend::train::TensorPlanner::TensorPlanner ( const TensorPlanner )
delete

◆ TensorPlanner() [3/3]

onert::backend::train::TensorPlanner::TensorPlanner ( TensorPlanner &&  )
delete

◆ ~TensorPlanner()

onert::backend::train::TensorPlanner::~TensorPlanner ( )
default

Member Function Documentation

◆ operator=() [1/2]

TensorPlanner & onert::backend::train::TensorPlanner::operator= ( const TensorPlanner )
delete

◆ operator=() [2/2]

TensorPlanner & onert::backend::train::TensorPlanner::operator= ( TensorPlanner &&  )
delete

◆ planBackPropTensors()

void onert::backend::train::TensorPlanner::planBackPropTensors ( TensorBuilder tensor_builder)

Definition at line 269 of file TensorPlanner.cc.

270{
271 VERBOSE(TensorPlanner) << "Start planning back-propagated tensors" << std::endl;
272
273 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
274 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
275
276 // Prepare scanning
277 const auto &training_usedefs = _tgraph.trainingUseDefs();
278 for (const auto &[operand_index, operand_usedefs] : training_usedefs)
279 {
280 const auto &operand = operand_usedefs.operand();
281
282 if (_external_operands.contains(operand_index.index()))
283 continue;
284
285 if (!tensor_builder->isRegisteredBackward(operand_index.index()))
286 continue;
287
288 if (operand_index.is_forward() || operand.isConstant())
289 continue;
290
291 uses_map[operand_index] = operand_usedefs.getTrainingUses().size();
292 defs_map[operand_index] = operand_usedefs.getTrainingDefs().size();
293 }
294
295 // Start scanning to do notify{First|Last}Use for each tensor
296
297 // This is a workaround to keep the operands over the execution
298 // (the operands look like they are unused)
299 std::vector<ir::train::TrainingOperandIndex> operands_last_until_end;
300 for (const auto &[ind, use_count] : uses_map)
301 {
302 if (use_count == 0)
303 operands_last_until_end.push_back(ind);
304 }
305
306 // At each operation,
307 // 1. Scan DEF of outgoing tnesors. If the first DEF, allocate it
308 // 2. Scan DEF of inputs. If variable tensor, throw an exception (not supported yet)
309 // 3. Scan USE of incoming tensors. Decrease the USE and deallocate if the USE is 0
310 std::set<ir::OperandIndex> unallocated;
311 _tgraph.operands().iterate(
312 [&](const ir::OperandIndex &index, const ir::Operand &) { unallocated.insert(index); });
313
314 const auto border = _tgraph.essentialBackwardOrder();
315 for (const auto &op_ind : border)
316 {
317 const auto &op = _tgraph.operations().at(op_ind);
318 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
319 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
320
321 // Allocate back-propagated tensors in first def
322 for (const auto &outgoing : op_inputs)
323 {
324 const auto operand_index = ir::train::TrainingOperandIndex{outgoing, false};
325 const auto &operand = _tgraph.operands().at(outgoing);
326 if (_external_operands.contains(outgoing))
327 continue;
328 if (!tensor_builder->isRegisteredBackward(outgoing))
329 continue;
330 if (operand.isConstant())
331 continue;
332
333 if (defs_map.find(operand_index) != defs_map.end())
334 {
335 if (unallocated.find(outgoing) != unallocated.end())
336 {
337 // First Def
338 unallocated.erase(outgoing);
339 defs_map[operand_index]--;
340 tensor_builder->notifyBackwardFirstUse(outgoing);
341 }
342 else
343 {
344 assert(defs_map[operand_index] > 0);
345 defs_map[operand_index]--;
346 }
347 }
348 }
349
350 // Scan variable tensors
351 // This tensor has features like constant. But OperandInfo and LowerInfo treat them as
352 // non-constant because of less memory usage by memory planning in here
353 // However, train backend does not support variable tensors yet
354 for (const auto &outgoing : op_inputs)
355 {
356 if (_external_operands.contains(outgoing))
357 continue;
358 if (!tensor_builder->isRegisteredBackward(outgoing))
359 continue;
360 const auto &operand = _tgraph.operands().at(outgoing);
361 if (operand.info().isVariable())
362 throw std::runtime_error("The train backend does not support variable tensors");
363 }
364
365 for (const auto &incoming : op_outputs)
366 {
367 const auto incoming_index = ir::train::TrainingOperandIndex{incoming, false};
368
369 if (_external_operands.contains(incoming))
370 continue;
371 if (!tensor_builder->isRegisteredBackward(incoming))
372 continue;
373
374 // NOTE There is no case where an op's incoming tensors don't have the corresponding op def
375 assert(defs_map.find(incoming_index) != defs_map.end());
376
377 if (uses_map.find(incoming_index) != uses_map.end())
378 {
379 assert(uses_map[incoming_index] > 0);
380 uses_map[incoming_index]--;
381 if (uses_map[incoming_index] == 0)
382 {
383 // plan for deallocation of static tensornode
384 tensor_builder->notifyBackwardLastUse(incoming);
385 }
386 }
387 }
388 }
389
390 for (const auto &index : operands_last_until_end)
391 {
392 tensor_builder->notifyBackwardLastUse(index.index());
393 }
394
395 assert(std::all_of(
396 uses_map.begin(), uses_map.end(),
397 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
398
399 assert(std::all_of(
400 defs_map.begin(), defs_map.end(),
401 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
402
403 VERBOSE(TensorPlanner) << "Finish planning back-propagated tensors" << std::endl;
404}
std::vector< ir::OperationIndex > essentialBackwardOrder() const
const Operations & operations() const override
const Operands & operands() const override
const UseDefChains & trainingUseDefs() const
void iterate(const std::function< void(const Index &, const Object &)> &fn) const
Iterate over the container with given function.
const Object & at(const Index &index) const
Get the object that is associated with the given index.
#define VERBOSE(name, lv)
Definition Log.h:71
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54
TrainingIndex< OperandIndex > TrainingOperandIndex
Type that provides index of operand for training.
Definition Index.h:128
::onert::util::Index< uint32_t, OperandIndexTag > OperandIndex
Definition Index.h:33

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegisteredBackward(), onert::util::ObjectManager< Index, Object >::iterate(), onert::backend::train::TensorBuilder::notifyBackwardFirstUse(), onert::backend::train::TensorBuilder::notifyBackwardLastUse(), onert::ir::train::TrainableGraph::operands(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planDisposableBackPropTensors()

void onert::backend::train::TensorPlanner::planDisposableBackPropTensors ( TensorBuilder tensor_builder)

Definition at line 450 of file TensorPlanner.cc.

451{
452 VERBOSE(TensorPlanner) << "Start planning disposable back-prop tensors" << std::endl;
453
454 for (const auto &op_index : _tgraph.essentialBackwardOrder())
455 {
456 // NOTE Even if there are duplicate indices, the duplicate back-propagated tensors may need
457 // to be updated respectively. So we use a sequence instead of a set.
458 const auto &inputs = _tgraph.operation(op_index).getInputs();
459 if (!(inputs == (inputs | ir::Remove::DUPLICATED)))
460 throw std::runtime_error("TensorPlanner: DispoableBackProp tensor does not support duplicate "
461 "inputs of an operation");
462
463 std::vector<DisposableTensorIndex> cur_seq;
464 const auto back_prop_indices = getOutgoingBackPropSeq(op_index, tensor_builder);
465 for (const auto &back_prop_index : back_prop_indices)
466 {
467 DisposableTensorIndex cur_index{op_index, back_prop_index};
468 if (tensor_builder->isRegisteredDisposableBackwardTensor(cur_index))
469 {
470 tensor_builder->notifyDisposableBackPropFirstUse(cur_index);
471 cur_seq.emplace_back(cur_index);
472 }
473 }
474
475 for (const auto &cur_index : cur_seq)
476 {
477 tensor_builder->notifyDisposableBackPropLastUse(cur_index);
478 }
479 }
480
481 VERBOSE(TensorPlanner) << "Finish planning disposable back-prop tensors" << std::endl;
482}
const ITrainableOperation & operation(OperationIndex index) const
virtual const OperandIndexSequence & getInputs() const =0

References onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::ir::IOperation::getInputs(), onert::backend::train::TensorBuilder::isRegisteredDisposableBackwardTensor(), onert::backend::train::TensorBuilder::notifyDisposableBackPropFirstUse(), onert::backend::train::TensorBuilder::notifyDisposableBackPropLastUse(), onert::ir::train::TrainableGraph::operation(), and VERBOSE.

◆ planGradientTensors()

void onert::backend::train::TensorPlanner::planGradientTensors ( TensorBuilder tensor_builder)

Definition at line 406 of file TensorPlanner.cc.

407{
408 VERBOSE(TensorPlanner) << "Start planning gradient tensors" << std::endl;
409
410 // TODO Use DisposableTensor instead of GradientTensor to plan them together if possible
411 // Backward layers and the corresponding GradientApplier exist in the same back-propagated
412 // operation sequence. So we can use DisposableTensors to plan GradientTensors.
413 for (const auto &op_index : _tgraph.essentialBackwardOrder())
414 {
415 std::vector<ir::train::TrainingOperandIndex> cur_seq;
416 const auto &op = _tgraph.operations().at(op_index);
417 const auto backwarding_op_index = ir::train::TrainingOperationIndex{op_index, false};
418 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
419
420 // Only inputs can be candidates for def of backwarding tensors
421 for (const auto &input : op_inputs)
422 {
423 if (_external_operands.contains(input))
424 continue;
425 if (!tensor_builder->isRegisteredBackward(input))
426 continue;
427
428 const auto gradient_index = ir::train::TrainingOperandIndex{input, false};
429 const auto &training_usedefs = _tgraph.trainingUseDefs();
430 const auto &usedefs = training_usedefs.at(gradient_index);
431 const auto &operand = usedefs.operand();
432 const auto &defs = usedefs.getTrainingDefs();
433 if (operand.isConstant() && defs.find(backwarding_op_index) != defs.end())
434 {
435 assert(defs.size() == 1);
436 tensor_builder->notifyBackwardFirstUse(input);
437 cur_seq.emplace_back(gradient_index);
438 }
439 }
440
441 for (const auto &operand_index : cur_seq)
442 {
443 tensor_builder->notifyBackwardLastUse(operand_index.index());
444 }
445 }
446
447 VERBOSE(TensorPlanner) << "Finish planning gradient tensors" << std::endl;
448}
TrainingIndex< OperationIndex > TrainingOperationIndex
Type that provides index of operation node for training.
Definition Index.h:119

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegisteredBackward(), onert::backend::train::TensorBuilder::notifyBackwardFirstUse(), onert::backend::train::TensorBuilder::notifyBackwardLastUse(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planLayerScopeTensors()

void onert::backend::train::TensorPlanner::planLayerScopeTensors ( TensorBuilder tensor_builder)

Definition at line 513 of file TensorPlanner.cc.

514{
515 VERBOSE(TensorPlanner) << "Start planning layer scope tensors" << std::endl;
516
517 // forwading order
518 const auto f_order = _tgraph.topolSortOperations();
519 for (const auto &op_index : f_order)
520 {
521 if (not tensor_builder->isRegisteredLayerScopeTensor(op_index))
522 continue;
523
524 const auto &indices = tensor_builder->getRegisteredLayerScopeTensorIndices(op_index);
525 for (const auto &idx : indices)
526 {
527 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
529 tensor_builder->notifyLayerScopeFirstUse(idx);
530 }
531 }
532
533 // backwarding order
534 const auto b_order = _tgraph.essentialBackwardOrder();
535 for (const auto &op_index : b_order)
536 {
537 if (not tensor_builder->isRegisteredLayerScopeTensor(op_index))
538 continue;
539
540 const auto &indices = tensor_builder->getRegisteredLayerScopeTensorIndices(op_index);
541
542 for (const auto &idx : indices)
543 {
544 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
546 tensor_builder->notifyLayerScopeFirstUse(idx);
547 }
548 for (const auto &idx : indices)
549 {
550 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
553 tensor_builder->notifyLayerScopeLastUse(idx);
554 }
555 }
556
557 VERBOSE(TensorPlanner) << "Finish planning layerscope tensors" << std::endl;
558}
std::vector< ir::OperationIndex > topolSortOperations() const

References onert::backend::train::BACKWARD, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::FORWARD_TO_BACKWARD, onert::backend::train::TensorBuilder::getLayerScopeTensorLifeTime(), onert::backend::train::TensorBuilder::getRegisteredLayerScopeTensorIndices(), onert::backend::train::TensorBuilder::isRegisteredLayerScopeTensor(), onert::backend::train::TensorBuilder::notifyLayerScopeFirstUse(), onert::backend::train::TensorBuilder::notifyLayerScopeLastUse(), onert::ir::train::TrainableGraph::topolSortOperations(), and VERBOSE.

◆ planNonConstTensors()

void onert::backend::train::TensorPlanner::planNonConstTensors ( TensorBuilder tensor_builder)

Definition at line 31 of file TensorPlanner.cc.

32{
33 VERBOSE(TensorPlanner) << "Start planning non-constant tensors" << std::endl;
34
35 const auto &training_usedefs = _tgraph.trainingUseDefs();
36
37 // NOTE The uses_map and defs_map must have the size of only registered tensors
38 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
39 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
40
41 // Prepare scanning
42 // This assumes TrainingOperationIndex in forwarding are always used
43 for (const auto &[operand_index, operand_usedefs] : training_usedefs)
44 {
45 const auto &operand = operand_usedefs.operand();
46
47 if (_external_operands.contains(operand_index.index()))
48 continue;
49
50 if (!operand_index.is_forward() || operand.isConstant())
51 continue;
52
53 uses_map[operand_index] = operand_usedefs.getTrainingUses().size();
54 defs_map[operand_index] = operand_usedefs.getTrainingDefs().size();
55 }
56
57 // Start scanning to do notify{First|Last}Use for each tensor
58 // TODO Remove this or find the reason why it is needed
59 // Q. Why is notifyFirstUse() called if operand's def count is 0?
60 // It's neither an external operand or a constant operand
61 // What does it mean when def count is 0?
62 // A. Not yet found the reason to need it yet.
63 for (const auto &[operand_index, def_count] : defs_map)
64 {
65 if (def_count == 0)
66 tensor_builder->notifyFirstUse(operand_index.index());
67 }
68
69 // This is a workaround to keep the operands over the execution
70 // (the operands look like they are unused)
71 std::vector<ir::train::TrainingOperandIndex> operands_last_until_end;
72 for (const auto &[operand_index, use_count] : uses_map)
73 {
74 if (use_count == 0)
75 operands_last_until_end.push_back(operand_index);
76 }
77
78 // Plan used or defined tensors in forwarding nodes
79 // At each operation,
80 // 1. Scan DEF of outputs. If the DEF, allocate it
81 // 2. Scan DEF of inputs. If variable tensor, throw an exception (not supported yet)
82 // 3. Scan USE of inputs/outputs. Decrease the USE and deallocate if the USE is 0
83 const auto order = _tgraph.topolSortOperations();
84 for (const auto &op_index : order)
85 {
86 const auto &op = _tgraph.operations().at(op_index);
87 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
88 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
89
90 // Define outputs
91 for (const auto &output : op_outputs)
92 {
93 if (_external_operands.contains(output))
94 continue;
95 if (!tensor_builder->isRegistered(output))
96 continue;
97
98 const auto output_index = ir::train::TrainingOperandIndex{output, true};
99 assert(defs_map.find(output_index) != defs_map.end());
100 assert(defs_map.at(output_index) == 1);
101 defs_map[output_index] = 0;
102 tensor_builder->notifyFirstUse(output_index.index());
103 }
104
105 // Scan variable tensors
106 // This tensor has features like constant. But OperandInfo and LowerInfo treat them as
107 // non-constant because of less memory usage by memory planning in here
108 // However, train backend does not support variable tensors yet
109 for (const auto &input : op_inputs)
110 {
111 if (_external_operands.contains(input))
112 continue;
113 if (!tensor_builder->isRegistered(input))
114 continue;
115
117 const auto &operand = training_usedefs.at(input_index).operand();
118 if (operand.isConstant())
119 continue;
120
121 assert(training_usedefs.find(input_index) != training_usedefs.end());
122 if (operand.info().isVariable())
123 throw std::runtime_error("The train backend does not support variable tensors");
124 }
125
126 for (const auto &input : op_inputs)
127 {
128 if (_external_operands.contains(input))
129 continue;
130 if (!tensor_builder->isRegistered(input))
131 continue;
132
134 const auto &operand = training_usedefs.at(input_index).operand();
135 if (operand.isConstant())
136 continue;
137
138 assert(uses_map.find(input_index) != uses_map.end());
139 assert(uses_map[input_index] > 0);
140 uses_map[input_index]--;
141 if (uses_map[input_index] == 0)
142 {
143 // plan for deallocation of static tensor node
144 tensor_builder->notifyLastUse(input_index.index());
145 }
146 }
147 }
148
149 // Plan used tensors in backwarding nodes
150 const auto border = _tgraph.essentialBackwardOrder();
151 for (const auto &op_index : border)
152 {
153 const auto &op = _tgraph.operations().at(op_index);
154 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
155 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
156
157 for (const auto &index : op_inputs + op_outputs)
158 {
159 if (_external_operands.contains(index))
160 continue;
161 if (!tensor_builder->isRegistered(index))
162 continue;
163
164 const auto operand_index = ir::train::TrainingOperandIndex{index, true};
165 assert(training_usedefs.find(operand_index) != training_usedefs.end());
166 const auto &operand_usedefs = training_usedefs.at(operand_index);
167 const auto &operand = operand_usedefs.operand();
168 if (operand.isConstant())
169 continue;
170
171 const auto &training_op_index = ir::train::TrainingOperationIndex{op_index, false};
172 assert(operand_usedefs.getTrainingDefs().find(training_op_index) ==
173 operand_usedefs.getTrainingDefs().end());
174
175 const auto &uses = operand_usedefs.getTrainingUses();
176 if (uses.find(training_op_index) != uses.end())
177 {
178 assert(uses_map.find(operand_index) != uses_map.end());
179 assert(uses_map[operand_index] > 0);
180 uses_map[operand_index]--;
181 if (uses_map[operand_index] == 0)
182 {
183 // plan for deallocation of static tensor node
184 tensor_builder->notifyLastUse(operand_index.index());
185 }
186 }
187 }
188 }
189
190 for (const auto &operand_index : operands_last_until_end)
191 {
192 tensor_builder->notifyLastUse(operand_index.index());
193 }
194
195 assert(std::all_of(
196 uses_map.begin(), uses_map.end(),
197 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
198
199 assert(std::all_of(
200 defs_map.begin(), defs_map.end(),
201 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
202
203 VERBOSE(TensorPlanner) << "Finish planning non-constant tensors" << std::endl;
204}

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegistered(), onert::backend::train::TensorBuilder::notifyFirstUse(), onert::backend::train::TensorBuilder::notifyLastUse(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::topolSortOperations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planTrainableTensors()

void onert::backend::train::TensorPlanner::planTrainableTensors ( TensorBuilder tensor_builder)

Definition at line 206 of file TensorPlanner.cc.

207{
208 VERBOSE(TensorPlanner) << "Start planning constant tensors" << std::endl;
209
210 const auto &training_usedefs = _tgraph.trainingUseDefs();
211
212 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
213 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
214 std::vector<ir::train::TrainingOperandIndex> constants;
215
216 // Prepare scanning
217 for (const auto &[operand_index, operand_usedefs] : training_usedefs)
218 {
219 const auto &operand = operand_usedefs.operand();
220
221 if (!operand_index.valid())
222 continue;
223
224 if (operand.isConstant() && operand_index.is_forward())
225 {
226 uses_map[operand_index] = 0;
227 const auto &defs = operand_usedefs.getTrainingDefs();
228 defs_map[operand_index] = defs.size(); // It means def_map's values are 0
229 constants.emplace_back(operand_index);
230 }
231 }
232
233 // Start scanning to do notify{First|Last}Use for each tensor
234 // If a tensor is a constant, increase the use of the tensor and allocate it first.
235 // Increasing use count here makes the tensor never be deallocated, i.e it they will be
236 // deallocated last.
237 for (const auto &index : constants)
238 {
239 assert(index.is_forward());
240 if (tensor_builder->isRegistered(index.index()))
241 {
242 uses_map[index]++;
243 tensor_builder->notifyFirstUse(index.index());
244 }
245 }
246
247 // Dispose and validate
248 for (const auto &index : constants)
249 {
250 assert(index.is_forward());
251 if (tensor_builder->isRegistered(index.index()))
252 {
253 uses_map[index]--;
254 tensor_builder->notifyLastUse(index.index());
255 }
256 }
257
258 assert(std::all_of(
259 uses_map.begin(), uses_map.end(),
260 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
261
262 assert(std::all_of(
263 defs_map.begin(), defs_map.end(),
264 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
265
266 VERBOSE(TensorPlanner) << "Finish planning constant tensors" << std::endl;
267}

References onert::backend::train::TensorBuilder::isRegistered(), onert::backend::train::TensorBuilder::notifyFirstUse(), onert::backend::train::TensorBuilder::notifyLastUse(), onert::ir::train::TrainableGraph::trainingUseDefs(), and VERBOSE.


The documentation for this class was generated from the following files: