ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
TensorBuilder.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "TensorBuilder.h"
18
19#include "Tensor.h"
20
22{
23
24TensorBuilder::TensorBuilder(const std::shared_ptr<TensorRegistry> &tensor_reg,
25 const exec::train::optimizer::Optimizer *optimizer)
26 : _tensor_reg{tensor_reg}, _tensor_mgr{new TensorManager(tensor_reg, optimizer->getVarCount())},
27 _optimizer{optimizer}
28{
29 /* empty */
30}
31
33{
34 _tensor_info_map.emplace(index, info);
35 _as_constants[index] = info.isConstant();
36
37 assert(!info.isDynamic());
38
39 // NOTE For now, whether or not to build operands to trainable tensor depends on whether
40 // the corresponding operand is constant.
41 if (_as_constants[index])
42 {
43 auto tensor = std::make_unique<TrainableTensor>(info);
44 _tensor_reg->setTrainableTensor(index, std::move(tensor));
45 }
46 else
47 {
48 auto tensor = std::make_unique<Tensor>(info);
49 _tensor_reg->setNonConstTensor(index, std::move(tensor));
50 }
51}
52
54 const ir::OperandInfo &info)
55{
56 _backward_tensor_info_map.emplace(index, info);
57
58 assert(!info.isDynamic());
59
60 // NOTE For now, whether or not to build operands to trainable tensor depends on whether
61 // the corresponding operand is constant.
62 assert(_as_constants[index] == info.isConstant());
63 if (_as_constants[index])
64 {
65 auto tensor = std::make_unique<GradientTensor>(info);
66 _tensor_reg->setGradientTensor(index, std::move(tensor));
67
68 // Initialize tensors for gradient variables
69 for (uint32_t i = 0; i < _optimizer->getVarCount(); ++i)
70 {
71 auto tensor = std::make_unique<Tensor>(info);
72 _tensor_reg->getTrainableTensor(index)->appendOptVar(std::move(tensor));
73 }
74 }
75 else
76 {
77 auto tensor = std::make_unique<BackPropTensor>(info);
78 _tensor_reg->setBackPropTensor(index, std::move(tensor));
79 }
80}
81
83 const ir::OperandInfo &info)
84{
85 assert(!info.isDynamic());
86 assert(!_as_constants[index.operand_index()]);
87
88 auto disposable_tensor = std::make_unique<BackPropTensor>(info);
89 _tensor_reg->setDisposableBackPropTensor(index, std::move(disposable_tensor));
90
91 _disposable_backprops.add(index);
92}
93
95 std::shared_ptr<LayerScopeTensor> &tensor)
96{
97 const auto op_idx = index.op_index();
98
99 const auto pair = _operation_to_layerscope.find(op_idx);
100 if (pair == _operation_to_layerscope.end())
101 {
103 tensor_indices.add(index);
104 _operation_to_layerscope[op_idx] = tensor_indices;
105 }
106 else
107 {
108 assert(!pair->second.contains(index));
109 pair->second.add(index);
110 }
111
112 _tensor_reg->setLayerScopeTensor(index, tensor);
113}
114
116{
117 // TODO Support momory plan
118 if (_as_constants[index])
119 {
120 _tensor_mgr->claimTrainablePlan(index);
121 }
122 else
123 {
124 _tensor_mgr->claimNonConstPlan(index);
125 }
126}
127
129{
130 if (_as_constants[index])
131 {
132 _tensor_mgr->releaseTrainablePlan(index);
133 }
134 else
135 {
136 _tensor_mgr->releaseNonConstPlan(index);
137 }
138}
139
141{
142 // TODO Support momory plan
143 if (_as_constants[index])
144 {
145 _tensor_mgr->claimGradientPlan(index);
146 }
147 else
148 {
149 _tensor_mgr->claimBackPropPlan(index);
150 }
151}
152
154{
155 if (_as_constants[index])
156 {
157 _tensor_mgr->releaseGradientPlan(index);
158 }
159 else
160 {
161 _tensor_mgr->releaseBackPropPlan(index);
162 }
163}
164
166{
167 _tensor_mgr->claimDisposableBackPropPlan(index);
168}
169
171{
172 _tensor_mgr->releaseDisposableBackPropPlan(index);
173}
174
176{
177 _tensor_mgr->claimLayerScopePlan(index);
178}
179
181{
182 _tensor_mgr->releaseLayerScopePlan(index);
183}
184
186{
187 return _tensor_info_map.find(index) != _tensor_info_map.end();
188}
189
191{
192 return _backward_tensor_info_map.find(index) != _backward_tensor_info_map.end();
193}
194
196{
197 return _disposable_backprops.contains(index);
198}
199
201{
202 const auto pair = _operation_to_layerscope.find(index);
203 return (pair != _operation_to_layerscope.end());
204}
205
208{
209 const auto pair = _operation_to_layerscope.find(index);
210 assert(pair != _operation_to_layerscope.end());
211
212 return pair->second;
213}
214
217{
218 const auto &ls_tensors = _tensor_reg->layerscope_tensors();
219 const auto &tensor = ls_tensors.at(index);
220 return tensor->lifetime();
221}
222
224{
225 _tensor_mgr->allocateNonConstTensors();
226 _tensor_mgr->allocateTrainableTensors();
227}
228
230{
231 _tensor_mgr->allocateBackPropTensors();
232 _tensor_mgr->allocateGradientTensors();
233 _tensor_mgr->allocateDisposableBackPropTensors();
234}
235
236void TensorBuilder::allocateLayerScope(void) { _tensor_mgr->allocateLayerScopeTensors(); }
237
238} // namespace onert::backend::train
Class that is index of DisposableTensor.
void notifyDisposableBackPropFirstUse(const DisposableTensorIndex &)
void registerLayerScopeTensor(const LayerScopeTensorIndex &index, std::shared_ptr< LayerScopeTensor > &info)
void notifyDisposableBackPropLastUse(const DisposableTensorIndex &)
bool isRegistered(const ir::OperandIndex &) const
void notifyLayerScopeLastUse(const LayerScopeTensorIndex &)
void notifyBackwardFirstUse(const ir::OperandIndex &)
void notifyLayerScopeFirstUse(const LayerScopeTensorIndex &)
void registerDisposableBackwardTensorInfo(const DisposableTensorIndex &index, const ir::OperandInfo &info)
void notifyBackwardLastUse(const ir::OperandIndex &)
bool isRegisteredDisposableBackwardTensor(const DisposableTensorIndex &index) const
const util::Set< LayerScopeTensorIndex > & getRegisteredLayerScopeTensorIndices(const ir::OperationIndex &) const
void notifyLastUse(const ir::OperandIndex &)
bool isRegisteredLayerScopeTensor(const ir::OperationIndex &) const
void notifyFirstUse(const ir::OperandIndex &)
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register tensor information to allocate on train backend.
bool isRegisteredBackward(const ir::OperandIndex &) const
LayerScopeTensorLifeTime getLayerScopeTensorLifeTime(const LayerScopeTensorIndex &) const
void registerBackwardTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register informations of tensor used only in backward to allocate on train backend.
TensorBuilder(const std::shared_ptr< TensorRegistry > &tensor_reg, const exec::train::optimizer::Optimizer *optimizer)
Base class for all optimizers.
Definition Optimizer.h:37
virtual uint32_t getVarCount() const =0
Get the number of optimizer variables s.
Class to save tensor's shape and type.
Definition OperandInfo.h:54
Class for set of custom element &.
Definition Set.h:37
void add(const Element &e)
Add a given element to the set.
Definition Set.h:68
volatile const char info[]