ONE - On-device Neural Engine
Loading...
Searching...
No Matches
TensorBuilder.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "TensorBuilder.h"
18
19#include "Tensor.h"
20
21namespace onert
22{
23namespace backend
24{
25namespace train
26{
27
28TensorBuilder::TensorBuilder(const std::shared_ptr<TensorRegistry> &tensor_reg,
29 const exec::train::optimizer::Optimizer *optimizer)
30 : _tensor_reg{tensor_reg}, _tensor_mgr{new TensorManager(tensor_reg, optimizer->getVarCount())},
31 _optimizer{optimizer}
32{
33 /* empty */
34}
35
37{
38 _tensor_info_map.emplace(index, info);
39 _as_constants[index] = info.isConstant();
40
41 assert(!info.isDynamic());
42
43 // NOTE For now, whether or not to build operands to trainable tensor depends on whether
44 // the corresponding operand is constant.
45 if (_as_constants[index])
46 {
47 auto tensor = std::make_unique<TrainableTensor>(info);
48 _tensor_reg->setTrainableTensor(index, std::move(tensor));
49 }
50 else
51 {
52 auto tensor = std::make_unique<Tensor>(info);
53 _tensor_reg->setNonConstTensor(index, std::move(tensor));
54 }
55}
56
58 const ir::OperandInfo &info)
59{
60 _backward_tensor_info_map.emplace(index, info);
61
62 assert(!info.isDynamic());
63
64 // NOTE For now, whether or not to build operands to trainable tensor depends on whether
65 // the corresponding operand is constant.
66 assert(_as_constants[index] == info.isConstant());
67 if (_as_constants[index])
68 {
69 auto tensor = std::make_unique<GradientTensor>(info);
70 _tensor_reg->setGradientTensor(index, std::move(tensor));
71
72 // Initialize tensors for gradient variables
73 for (uint32_t i = 0; i < _optimizer->getVarCount(); ++i)
74 {
75 auto tensor = std::make_unique<Tensor>(info);
76 _tensor_reg->getTrainableTensor(index)->appendOptVar(std::move(tensor));
77 }
78 }
79 else
80 {
81 auto tensor = std::make_unique<BackPropTensor>(info);
82 _tensor_reg->setBackPropTensor(index, std::move(tensor));
83 }
84}
85
87 const ir::OperandInfo &info)
88{
89 assert(!info.isDynamic());
90 assert(!_as_constants[index.operand_index()]);
91
92 auto disposable_tensor = std::make_unique<BackPropTensor>(info);
93 _tensor_reg->setDisposableBackPropTensor(index, std::move(disposable_tensor));
94
95 _disposable_backprops.add(index);
96}
97
99 std::shared_ptr<LayerScopeTensor> &tensor)
100{
101 const auto op_idx = index.op_index();
102
103 const auto pair = _operation_to_layerscope.find(op_idx);
104 if (pair == _operation_to_layerscope.end())
105 {
107 tensor_indices.add(index);
108 _operation_to_layerscope[op_idx] = tensor_indices;
109 }
110 else
111 {
112 assert(!pair->second.contains(index));
113 pair->second.add(index);
114 }
115
116 _tensor_reg->setLayerScopeTensor(index, tensor);
117}
118
120{
121 // TODO Support momory plan
122 if (_as_constants[index])
123 {
124 _tensor_mgr->claimTrainablePlan(index);
125 }
126 else
127 {
128 _tensor_mgr->claimNonConstPlan(index);
129 }
130}
131
133{
134 if (_as_constants[index])
135 {
136 _tensor_mgr->releaseTrainablePlan(index);
137 }
138 else
139 {
140 _tensor_mgr->releaseNonConstPlan(index);
141 }
142}
143
145{
146 // TODO Support momory plan
147 if (_as_constants[index])
148 {
149 _tensor_mgr->claimGradientPlan(index);
150 }
151 else
152 {
153 _tensor_mgr->claimBackPropPlan(index);
154 }
155}
156
158{
159 if (_as_constants[index])
160 {
161 _tensor_mgr->releaseGradientPlan(index);
162 }
163 else
164 {
165 _tensor_mgr->releaseBackPropPlan(index);
166 }
167}
168
170{
171 _tensor_mgr->claimDisposableBackPropPlan(index);
172}
173
175{
176 _tensor_mgr->releaseDisposableBackPropPlan(index);
177}
178
180{
181 _tensor_mgr->claimLayerScopePlan(index);
182}
183
185{
186 _tensor_mgr->releaseLayerScopePlan(index);
187}
188
190{
191 return _tensor_info_map.find(index) != _tensor_info_map.end();
192}
193
195{
196 return _backward_tensor_info_map.find(index) != _backward_tensor_info_map.end();
197}
198
200{
201 return _disposable_backprops.contains(index);
202}
203
205{
206 const auto pair = _operation_to_layerscope.find(index);
207 return (pair != _operation_to_layerscope.end());
208}
209
212{
213 const auto pair = _operation_to_layerscope.find(index);
214 assert(pair != _operation_to_layerscope.end());
215
216 return pair->second;
217}
218
221{
222 const auto &ls_tensors = _tensor_reg->layerscope_tensors();
223 const auto &tensor = ls_tensors.at(index);
224 return tensor->lifetime();
225}
226
228{
229 _tensor_mgr->allocateNonConstTensors();
230 _tensor_mgr->allocateTrainableTensors();
231}
232
234{
235 _tensor_mgr->allocateBackPropTensors();
236 _tensor_mgr->allocateGradientTensors();
237 _tensor_mgr->allocateDisposableBackPropTensors();
238}
239
240void TensorBuilder::allocateLayerScope(void) { _tensor_mgr->allocateLayerScopeTensors(); }
241
242} // namespace train
243} // namespace backend
244} // namespace onert
Class that is index of DisposableTensor.
void notifyDisposableBackPropFirstUse(const DisposableTensorIndex &)
void registerLayerScopeTensor(const LayerScopeTensorIndex &index, std::shared_ptr< LayerScopeTensor > &info)
void notifyDisposableBackPropLastUse(const DisposableTensorIndex &)
bool isRegistered(const ir::OperandIndex &) const
void notifyLayerScopeLastUse(const LayerScopeTensorIndex &)
void notifyBackwardFirstUse(const ir::OperandIndex &)
void notifyLayerScopeFirstUse(const LayerScopeTensorIndex &)
void registerDisposableBackwardTensorInfo(const DisposableTensorIndex &index, const ir::OperandInfo &info)
void notifyBackwardLastUse(const ir::OperandIndex &)
bool isRegisteredDisposableBackwardTensor(const DisposableTensorIndex &index) const
const util::Set< LayerScopeTensorIndex > & getRegisteredLayerScopeTensorIndices(const ir::OperationIndex &) const
void notifyLastUse(const ir::OperandIndex &)
bool isRegisteredLayerScopeTensor(const ir::OperationIndex &) const
void notifyFirstUse(const ir::OperandIndex &)
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register tensor information to allocate on train backend.
bool isRegisteredBackward(const ir::OperandIndex &) const
LayerScopeTensorLifeTime getLayerScopeTensorLifeTime(const LayerScopeTensorIndex &) const
void registerBackwardTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register informations of tensor used only in backward to allocate on train backend.
TensorBuilder(const std::shared_ptr< TensorRegistry > &tensor_reg, const exec::train::optimizer::Optimizer *optimizer)
Base class for all optimizers.
Definition Optimizer.h:43
virtual uint32_t getVarCount() const =0
Get the number of optimizer variables s.
Class to save tensor's shape and type.
Definition OperandInfo.h:56
Class for set of custom element &.
Definition Set.h:39
void add(const Element &e)
Add a given element to the set.
Definition Set.h:70
volatile const char info[]