ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::backend::train::TensorPlanner Class Reference

#include <TensorPlanner.h>

Public Member Functions

 TensorPlanner (const ir::train::TrainableGraph &tgraph, const util::Set< ir::OperandIndex > &external_operands)
 
 TensorPlanner (const TensorPlanner &)=delete
 
 TensorPlanner (TensorPlanner &&)=delete
 
TensorPlanneroperator= (const TensorPlanner &)=delete
 
TensorPlanneroperator= (TensorPlanner &&)=delete
 
 ~TensorPlanner ()=default
 
void planNonConstTensors (TensorBuilder *tensor_builder)
 
void planTrainableTensors (TensorBuilder *tensor_builder)
 
void planBackPropTensors (TensorBuilder *tensor_builder)
 
void planGradientTensors (TensorBuilder *tensor_builder)
 
void planDisposableBackPropTensors (TensorBuilder *tensor_builder)
 
void planLayerScopeTensors (TensorBuilder *tensor_builder)
 

Detailed Description

Definition at line 32 of file TensorPlanner.h.

Constructor & Destructor Documentation

◆ TensorPlanner() [1/3]

onert::backend::train::TensorPlanner::TensorPlanner ( const ir::train::TrainableGraph tgraph,
const util::Set< ir::OperandIndex > &  external_operands 
)

Definition at line 28 of file TensorPlanner.cc.

30 : _tgraph{tgraph}, _external_operands{external_operands}
31{
32 // DO NOTHING
33}

◆ TensorPlanner() [2/3]

onert::backend::train::TensorPlanner::TensorPlanner ( const TensorPlanner )
delete

◆ TensorPlanner() [3/3]

onert::backend::train::TensorPlanner::TensorPlanner ( TensorPlanner &&  )
delete

◆ ~TensorPlanner()

onert::backend::train::TensorPlanner::~TensorPlanner ( )
default

Member Function Documentation

◆ operator=() [1/2]

TensorPlanner & onert::backend::train::TensorPlanner::operator= ( const TensorPlanner )
delete

◆ operator=() [2/2]

TensorPlanner & onert::backend::train::TensorPlanner::operator= ( TensorPlanner &&  )
delete

◆ planBackPropTensors()

void onert::backend::train::TensorPlanner::planBackPropTensors ( TensorBuilder tensor_builder)

Definition at line 275 of file TensorPlanner.cc.

276{
277 VERBOSE(TensorPlanner) << "Start planning back-propagated tensors" << std::endl;
278
279 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
280 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
281
282 // Prepare scanning
283 const auto &training_usedefs = _tgraph.trainingUseDefs();
284 for (const auto &[operand_index, operand_usedefs] : training_usedefs)
285 {
286 const auto &operand = operand_usedefs.operand();
287
288 if (_external_operands.contains(operand_index.index()))
289 continue;
290
291 if (!tensor_builder->isRegisteredBackward(operand_index.index()))
292 continue;
293
294 if (operand_index.is_forward() || operand.isConstant())
295 continue;
296
297 uses_map[operand_index] = operand_usedefs.getTrainingUses().size();
298 defs_map[operand_index] = operand_usedefs.getTrainingDefs().size();
299 }
300
301 // Start scanning to do notify{First|Last}Use for each tensor
302
303 // This is a workaround to keep the operands over the execution
304 // (the operands look like they are unused)
305 std::vector<ir::train::TrainingOperandIndex> operands_last_until_end;
306 for (const auto &[ind, use_count] : uses_map)
307 {
308 if (use_count == 0)
309 operands_last_until_end.push_back(ind);
310 }
311
312 // At each operation,
313 // 1. Scan DEF of outgoing tnesors. If the first DEF, allocate it
314 // 2. Scan DEF of inputs. If variable tensor, throw an exception (not supported yet)
315 // 3. Scan USE of incoming tensors. Decrease the USE and deallocate if the USE is 0
316 std::set<ir::OperandIndex> unallocated;
317 _tgraph.operands().iterate(
318 [&](const ir::OperandIndex &index, const ir::Operand &) { unallocated.insert(index); });
319
320 const auto border = _tgraph.essentialBackwardOrder();
321 for (const auto &op_ind : border)
322 {
323 const auto &op = _tgraph.operations().at(op_ind);
324 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
325 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
326
327 // Allocate back-propagated tensors in first def
328 for (const auto &outgoing : op_inputs)
329 {
330 const auto operand_index = ir::train::TrainingOperandIndex{outgoing, false};
331 const auto &operand = _tgraph.operands().at(outgoing);
332 if (_external_operands.contains(outgoing))
333 continue;
334 if (!tensor_builder->isRegisteredBackward(outgoing))
335 continue;
336 if (operand.isConstant())
337 continue;
338
339 if (defs_map.find(operand_index) != defs_map.end())
340 {
341 if (unallocated.find(outgoing) != unallocated.end())
342 {
343 // First Def
344 unallocated.erase(outgoing);
345 defs_map[operand_index]--;
346 tensor_builder->notifyBackwardFirstUse(outgoing);
347 }
348 else
349 {
350 assert(defs_map[operand_index] > 0);
351 defs_map[operand_index]--;
352 }
353 }
354 }
355
356 // Scan variable tensors
357 // This tensor has features like constant. But OperandInfo and LowerInfo treat them as
358 // non-constant because of less memory usage by memory planning in here
359 // However, train backend does not support variable tensors yet
360 for (const auto &outgoing : op_inputs)
361 {
362 if (_external_operands.contains(outgoing))
363 continue;
364 if (!tensor_builder->isRegisteredBackward(outgoing))
365 continue;
366 const auto &operand = _tgraph.operands().at(outgoing);
367 if (operand.info().isVariable())
368 throw std::runtime_error("The train backend does not support variable tensors");
369 }
370
371 for (const auto &incoming : op_outputs)
372 {
373 const auto incoming_index = ir::train::TrainingOperandIndex{incoming, false};
374
375 if (_external_operands.contains(incoming))
376 continue;
377 if (!tensor_builder->isRegisteredBackward(incoming))
378 continue;
379
380 // NOTE There is no case where an op's incoming tensors don't have the corresponding op def
381 assert(defs_map.find(incoming_index) != defs_map.end());
382
383 if (uses_map.find(incoming_index) != uses_map.end())
384 {
385 assert(uses_map[incoming_index] > 0);
386 uses_map[incoming_index]--;
387 if (uses_map[incoming_index] == 0)
388 {
389 // plan for deallocation of static tensornode
390 tensor_builder->notifyBackwardLastUse(incoming);
391 }
392 }
393 }
394 }
395
396 for (const auto &index : operands_last_until_end)
397 {
398 tensor_builder->notifyBackwardLastUse(index.index());
399 }
400
401 assert(std::all_of(
402 uses_map.begin(), uses_map.end(),
403 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
404
405 assert(std::all_of(
406 defs_map.begin(), defs_map.end(),
407 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
408
409 VERBOSE(TensorPlanner) << "Finish planning back-propagated tensors" << std::endl;
410}
std::vector< ir::OperationIndex > essentialBackwardOrder() const
const Operations & operations() const override
const Operands & operands() const override
const UseDefChains & trainingUseDefs() const
void iterate(const std::function< void(const Index &, const Object &)> &fn) const
Iterate over the container with given function.
const Object & at(const Index &index) const
Get the object that is associated with the given index.
#define VERBOSE(name, lv)
Definition Log.h:71
loco::GraphInputIndex index(const TFPlaceholder *node)
Definition TFNode.cpp:54
TrainingIndex< OperandIndex > TrainingOperandIndex
Type that provides index of operand for training.
Definition Index.h:132
::onert::util::Index< uint32_t, OperandIndexTag > OperandIndex
Definition Index.h:35

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegisteredBackward(), onert::util::ObjectManager< Index, Object >::iterate(), onert::backend::train::TensorBuilder::notifyBackwardFirstUse(), onert::backend::train::TensorBuilder::notifyBackwardLastUse(), onert::ir::train::TrainableGraph::operands(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planDisposableBackPropTensors()

void onert::backend::train::TensorPlanner::planDisposableBackPropTensors ( TensorBuilder tensor_builder)

Definition at line 456 of file TensorPlanner.cc.

457{
458 VERBOSE(TensorPlanner) << "Start planning disposable back-prop tensors" << std::endl;
459
460 for (const auto &op_index : _tgraph.essentialBackwardOrder())
461 {
462 // NOTE Even if there are duplicate indices, the duplicate back-propagated tensors may need
463 // to be updated respectively. So we use a sequence instead of a set.
464 const auto &inputs = _tgraph.operation(op_index).getInputs();
465 if (!(inputs == (inputs | ir::Remove::DUPLICATED)))
466 throw std::runtime_error("TensorPlanner: DispoableBackProp tensor does not support duplicate "
467 "inputs of an operation");
468
469 std::vector<DisposableTensorIndex> cur_seq;
470 const auto back_prop_indices = getOutgoingBackPropSeq(op_index, tensor_builder);
471 for (const auto &back_prop_index : back_prop_indices)
472 {
473 DisposableTensorIndex cur_index{op_index, back_prop_index};
474 if (tensor_builder->isRegisteredDisposableBackwardTensor(cur_index))
475 {
476 tensor_builder->notifyDisposableBackPropFirstUse(cur_index);
477 cur_seq.emplace_back(cur_index);
478 }
479 }
480
481 for (const auto &cur_index : cur_seq)
482 {
483 tensor_builder->notifyDisposableBackPropLastUse(cur_index);
484 }
485 }
486
487 VERBOSE(TensorPlanner) << "Finish planning disposable back-prop tensors" << std::endl;
488}
const ITrainableOperation & operation(OperationIndex index) const
virtual const OperandIndexSequence & getInputs() const =0

References onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::ir::IOperation::getInputs(), onert::backend::train::TensorBuilder::isRegisteredDisposableBackwardTensor(), onert::backend::train::TensorBuilder::notifyDisposableBackPropFirstUse(), onert::backend::train::TensorBuilder::notifyDisposableBackPropLastUse(), onert::ir::train::TrainableGraph::operation(), and VERBOSE.

◆ planGradientTensors()

void onert::backend::train::TensorPlanner::planGradientTensors ( TensorBuilder tensor_builder)

Definition at line 412 of file TensorPlanner.cc.

413{
414 VERBOSE(TensorPlanner) << "Start planning gradient tensors" << std::endl;
415
416 // TODO Use DisposableTensor instead of GradientTensor to plan them together if possible
417 // Backward layers and the corresponding GradientApplier exist in the same back-propagated
418 // operation sequence. So we can use DisposableTensors to plan GradientTensors.
419 for (const auto &op_index : _tgraph.essentialBackwardOrder())
420 {
421 std::vector<ir::train::TrainingOperandIndex> cur_seq;
422 const auto &op = _tgraph.operations().at(op_index);
423 const auto backwarding_op_index = ir::train::TrainingOperationIndex{op_index, false};
424 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
425
426 // Only inputs can be candidates for def of backwarding tensors
427 for (const auto &input : op_inputs)
428 {
429 if (_external_operands.contains(input))
430 continue;
431 if (!tensor_builder->isRegisteredBackward(input))
432 continue;
433
434 const auto gradient_index = ir::train::TrainingOperandIndex{input, false};
435 const auto &training_usedefs = _tgraph.trainingUseDefs();
436 const auto &usedefs = training_usedefs.at(gradient_index);
437 const auto &operand = usedefs.operand();
438 const auto &defs = usedefs.getTrainingDefs();
439 if (operand.isConstant() && defs.find(backwarding_op_index) != defs.end())
440 {
441 assert(defs.size() == 1);
442 tensor_builder->notifyBackwardFirstUse(input);
443 cur_seq.emplace_back(gradient_index);
444 }
445 }
446
447 for (const auto &operand_index : cur_seq)
448 {
449 tensor_builder->notifyBackwardLastUse(operand_index.index());
450 }
451 }
452
453 VERBOSE(TensorPlanner) << "Finish planning gradient tensors" << std::endl;
454}
TrainingIndex< OperationIndex > TrainingOperationIndex
Type that provides index of operation node for training.
Definition Index.h:123

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegisteredBackward(), onert::backend::train::TensorBuilder::notifyBackwardFirstUse(), onert::backend::train::TensorBuilder::notifyBackwardLastUse(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planLayerScopeTensors()

void onert::backend::train::TensorPlanner::planLayerScopeTensors ( TensorBuilder tensor_builder)

Definition at line 519 of file TensorPlanner.cc.

520{
521 VERBOSE(TensorPlanner) << "Start planning layer scope tensors" << std::endl;
522
523 // forwading order
524 const auto f_order = _tgraph.topolSortOperations();
525 for (const auto &op_index : f_order)
526 {
527 if (not tensor_builder->isRegisteredLayerScopeTensor(op_index))
528 continue;
529
530 const auto &indices = tensor_builder->getRegisteredLayerScopeTensorIndices(op_index);
531 for (const auto &idx : indices)
532 {
533 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
535 tensor_builder->notifyLayerScopeFirstUse(idx);
536 }
537 }
538
539 // backwarding order
540 const auto b_order = _tgraph.essentialBackwardOrder();
541 for (const auto &op_index : b_order)
542 {
543 if (not tensor_builder->isRegisteredLayerScopeTensor(op_index))
544 continue;
545
546 const auto &indices = tensor_builder->getRegisteredLayerScopeTensorIndices(op_index);
547
548 for (const auto &idx : indices)
549 {
550 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
552 tensor_builder->notifyLayerScopeFirstUse(idx);
553 }
554 for (const auto &idx : indices)
555 {
556 const auto lt = tensor_builder->getLayerScopeTensorLifeTime(idx);
559 tensor_builder->notifyLayerScopeLastUse(idx);
560 }
561 }
562
563 VERBOSE(TensorPlanner) << "Finish planning layerscope tensors" << std::endl;
564}
std::vector< ir::OperationIndex > topolSortOperations() const

References onert::backend::train::BACKWARD, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::FORWARD_TO_BACKWARD, onert::backend::train::TensorBuilder::getLayerScopeTensorLifeTime(), onert::backend::train::TensorBuilder::getRegisteredLayerScopeTensorIndices(), onert::backend::train::TensorBuilder::isRegisteredLayerScopeTensor(), onert::backend::train::TensorBuilder::notifyLayerScopeFirstUse(), onert::backend::train::TensorBuilder::notifyLayerScopeLastUse(), onert::ir::train::TrainableGraph::topolSortOperations(), and VERBOSE.

◆ planNonConstTensors()

void onert::backend::train::TensorPlanner::planNonConstTensors ( TensorBuilder tensor_builder)

Definition at line 35 of file TensorPlanner.cc.

36{
37 VERBOSE(TensorPlanner) << "Start planning non-constant tensors" << std::endl;
38
39 const auto &training_usedefs = _tgraph.trainingUseDefs();
40
41 // NOTE The uses_map and defs_map must have the size of only registered tensors
42 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
43 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
44
45 // Prepare scanning
46 // This assumes TrainingOperationIndex in forwarding are always used
47 for (const auto &[operand_index, operand_usedefs] : training_usedefs)
48 {
49 const auto &operand = operand_usedefs.operand();
50
51 if (_external_operands.contains(operand_index.index()))
52 continue;
53
54 if (!operand_index.is_forward() || operand.isConstant())
55 continue;
56
57 uses_map[operand_index] = operand_usedefs.getTrainingUses().size();
58 defs_map[operand_index] = operand_usedefs.getTrainingDefs().size();
59 }
60
61 // Start scanning to do notify{First|Last}Use for each tensor
62 // TODO Remove this or find the reason why it is needed
63 // Q. Why is notifyFirstUse() called if operand's def count is 0?
64 // It's neither an external operand or a constant operand
65 // What does it mean when def count is 0?
66 // A. Not yet found the reason to need it yet.
67 for (const auto &[operand_index, def_count] : defs_map)
68 {
69 if (def_count == 0)
70 tensor_builder->notifyFirstUse(operand_index.index());
71 }
72
73 // This is a workaround to keep the operands over the execution
74 // (the operands look like they are unused)
75 std::vector<ir::train::TrainingOperandIndex> operands_last_until_end;
76 for (const auto &[operand_index, use_count] : uses_map)
77 {
78 if (use_count == 0)
79 operands_last_until_end.push_back(operand_index);
80 }
81
82 // Plan used or defined tensors in forwarding nodes
83 // At each operation,
84 // 1. Scan DEF of outputs. If the DEF, allocate it
85 // 2. Scan DEF of inputs. If variable tensor, throw an exception (not supported yet)
86 // 3. Scan USE of inputs/outputs. Decrease the USE and deallocate if the USE is 0
87 const auto order = _tgraph.topolSortOperations();
88 for (const auto &op_index : order)
89 {
90 const auto &op = _tgraph.operations().at(op_index);
91 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
92 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
93
94 // Define outputs
95 for (const auto &output : op_outputs)
96 {
97 if (_external_operands.contains(output))
98 continue;
99 if (!tensor_builder->isRegistered(output))
100 continue;
101
102 const auto output_index = ir::train::TrainingOperandIndex{output, true};
103 assert(defs_map.find(output_index) != defs_map.end());
104 assert(defs_map.at(output_index) == 1);
105 defs_map[output_index] = 0;
106 tensor_builder->notifyFirstUse(output_index.index());
107 }
108
109 // Scan variable tensors
110 // This tensor has features like constant. But OperandInfo and LowerInfo treat them as
111 // non-constant because of less memory usage by memory planning in here
112 // However, train backend does not support variable tensors yet
113 for (const auto &input : op_inputs)
114 {
115 if (_external_operands.contains(input))
116 continue;
117 if (!tensor_builder->isRegistered(input))
118 continue;
119
121 const auto &operand = training_usedefs.at(input_index).operand();
122 if (operand.isConstant())
123 continue;
124
125 assert(training_usedefs.find(input_index) != training_usedefs.end());
126 if (operand.info().isVariable())
127 throw std::runtime_error("The train backend does not support variable tensors");
128 }
129
130 for (const auto &input : op_inputs)
131 {
132 if (_external_operands.contains(input))
133 continue;
134 if (!tensor_builder->isRegistered(input))
135 continue;
136
138 const auto &operand = training_usedefs.at(input_index).operand();
139 if (operand.isConstant())
140 continue;
141
142 assert(uses_map.find(input_index) != uses_map.end());
143 assert(uses_map[input_index] > 0);
144 uses_map[input_index]--;
145 if (uses_map[input_index] == 0)
146 {
147 // plan for deallocation of static tensor node
148 tensor_builder->notifyLastUse(input_index.index());
149 }
150 }
151 }
152
153 // Plan used tensors in backwarding nodes
154 const auto border = _tgraph.essentialBackwardOrder();
155 for (const auto &op_index : border)
156 {
157 const auto &op = _tgraph.operations().at(op_index);
158 auto op_inputs = op.getInputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
159 auto op_outputs = op.getOutputs() | ir::Remove::DUPLICATED | ir::Remove::UNDEFINED;
160
161 for (const auto &index : op_inputs + op_outputs)
162 {
163 if (_external_operands.contains(index))
164 continue;
165 if (!tensor_builder->isRegistered(index))
166 continue;
167
168 const auto operand_index = ir::train::TrainingOperandIndex{index, true};
169 assert(training_usedefs.find(operand_index) != training_usedefs.end());
170 const auto &operand_usedefs = training_usedefs.at(operand_index);
171 const auto &operand = operand_usedefs.operand();
172 if (operand.isConstant())
173 continue;
174
175 const auto &training_op_index = ir::train::TrainingOperationIndex{op_index, false};
176 assert(operand_usedefs.getTrainingDefs().find(training_op_index) ==
177 operand_usedefs.getTrainingDefs().end());
178
179 const auto &uses = operand_usedefs.getTrainingUses();
180 if (uses.find(training_op_index) != uses.end())
181 {
182 assert(uses_map.find(operand_index) != uses_map.end());
183 assert(uses_map[operand_index] > 0);
184 uses_map[operand_index]--;
185 if (uses_map[operand_index] == 0)
186 {
187 // plan for deallocation of static tensor node
188 tensor_builder->notifyLastUse(operand_index.index());
189 }
190 }
191 }
192 }
193
194 for (const auto &operand_index : operands_last_until_end)
195 {
196 tensor_builder->notifyLastUse(operand_index.index());
197 }
198
199 assert(std::all_of(
200 uses_map.begin(), uses_map.end(),
201 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
202
203 assert(std::all_of(
204 defs_map.begin(), defs_map.end(),
205 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
206
207 VERBOSE(TensorPlanner) << "Finish planning non-constant tensors" << std::endl;
208}

References onert::util::ObjectManager< Index, Object >::at(), onert::ir::DUPLICATED, onert::ir::train::TrainableGraph::essentialBackwardOrder(), onert::backend::train::TensorBuilder::isRegistered(), onert::backend::train::TensorBuilder::notifyFirstUse(), onert::backend::train::TensorBuilder::notifyLastUse(), onert::ir::train::TrainableGraph::operations(), onert::ir::train::TrainableGraph::topolSortOperations(), onert::ir::train::TrainableGraph::trainingUseDefs(), onert::ir::UNDEFINED, and VERBOSE.

◆ planTrainableTensors()

void onert::backend::train::TensorPlanner::planTrainableTensors ( TensorBuilder tensor_builder)

Definition at line 210 of file TensorPlanner.cc.

211{
212 VERBOSE(TensorPlanner) << "Start planning constant tensors" << std::endl;
213
214 const auto &training_usedefs = _tgraph.trainingUseDefs();
215
216 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> uses_map;
217 std::unordered_map<ir::train::TrainingOperandIndex, uint32_t> defs_map;
218 std::vector<ir::train::TrainingOperandIndex> constants;
219
220 // Prepare scanning
221 for (const auto &pair : training_usedefs)
222 {
223 const auto &operand_index = pair.first;
224 const auto &operand_usedefs = pair.second;
225 const auto &operand = operand_usedefs.operand();
226
227 if (!operand_index.valid())
228 continue;
229
230 if (operand.isConstant() && operand_index.is_forward())
231 {
232 uses_map[operand_index] = 0;
233 const auto &defs = operand_usedefs.getTrainingDefs();
234 defs_map[operand_index] = defs.size(); // It means def_map's values are 0
235 constants.emplace_back(operand_index);
236 }
237 }
238
239 // Start scanning to do notify{First|Last}Use for each tensor
240 // If a tensor is a constant, increase the use of the tensor and allocate it first.
241 // Increasing use count here makes the tensor never be deallocated, i.e it they will be
242 // deallocated last.
243 for (const auto &index : constants)
244 {
245 assert(index.is_forward());
246 if (tensor_builder->isRegistered(index.index()))
247 {
248 uses_map[index]++;
249 tensor_builder->notifyFirstUse(index.index());
250 }
251 }
252
253 // Dispose and validate
254 for (const auto &index : constants)
255 {
256 assert(index.is_forward());
257 if (tensor_builder->isRegistered(index.index()))
258 {
259 uses_map[index]--;
260 tensor_builder->notifyLastUse(index.index());
261 }
262 }
263
264 assert(std::all_of(
265 uses_map.begin(), uses_map.end(),
266 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
267
268 assert(std::all_of(
269 defs_map.begin(), defs_map.end(),
270 [](std::pair<const ir::train::TrainingOperandIndex, uint32_t> it) { return it.second == 0; }));
271
272 VERBOSE(TensorPlanner) << "Finish planning constant tensors" << std::endl;
273}

References onert::backend::train::TensorBuilder::isRegistered(), onert::backend::train::TensorBuilder::notifyFirstUse(), onert::backend::train::TensorBuilder::notifyLastUse(), onert::ir::train::TrainableGraph::trainingUseDefs(), and VERBOSE.


The documentation for this class was generated from the following files: