30 : _tensor_reg{tensor_reg}, _tensor_mgr{new
TensorManager(tensor_reg, optimizer->getVarCount())},
38 _tensor_info_map.emplace(index,
info);
39 _as_constants[index] =
info.isConstant();
41 assert(!
info.isDynamic());
45 if (_as_constants[index])
47 auto tensor = std::make_unique<TrainableTensor>(
info);
48 _tensor_reg->setTrainableTensor(index, std::move(tensor));
52 auto tensor = std::make_unique<Tensor>(
info);
53 _tensor_reg->setNonConstTensor(index, std::move(tensor));
60 _backward_tensor_info_map.emplace(index,
info);
62 assert(!
info.isDynamic());
66 assert(_as_constants[index] ==
info.isConstant());
67 if (_as_constants[index])
69 auto tensor = std::make_unique<GradientTensor>(
info);
70 _tensor_reg->setGradientTensor(index, std::move(tensor));
73 for (uint32_t i = 0; i < _optimizer->
getVarCount(); ++i)
75 auto tensor = std::make_unique<Tensor>(
info);
76 _tensor_reg->getTrainableTensor(index)->appendOptVar(std::move(tensor));
81 auto tensor = std::make_unique<BackPropTensor>(
info);
82 _tensor_reg->setBackPropTensor(index, std::move(tensor));
89 assert(!
info.isDynamic());
90 assert(!_as_constants[index.operand_index()]);
92 auto disposable_tensor = std::make_unique<BackPropTensor>(
info);
93 _tensor_reg->setDisposableBackPropTensor(index, std::move(disposable_tensor));
95 _disposable_backprops.add(index);
99 std::shared_ptr<LayerScopeTensor> &tensor)
101 const auto op_idx = index.op_index();
103 const auto pair = _operation_to_layerscope.find(op_idx);
104 if (pair == _operation_to_layerscope.end())
107 tensor_indices.
add(index);
108 _operation_to_layerscope[op_idx] = tensor_indices;
112 assert(!pair->second.contains(index));
113 pair->second.add(index);
116 _tensor_reg->setLayerScopeTensor(index, tensor);
122 if (_as_constants[index])
124 _tensor_mgr->claimTrainablePlan(index);
128 _tensor_mgr->claimNonConstPlan(index);
134 if (_as_constants[index])
136 _tensor_mgr->releaseTrainablePlan(index);
140 _tensor_mgr->releaseNonConstPlan(index);
147 if (_as_constants[index])
149 _tensor_mgr->claimGradientPlan(index);
153 _tensor_mgr->claimBackPropPlan(index);
159 if (_as_constants[index])
161 _tensor_mgr->releaseGradientPlan(index);
165 _tensor_mgr->releaseBackPropPlan(index);
171 _tensor_mgr->claimDisposableBackPropPlan(index);
176 _tensor_mgr->releaseDisposableBackPropPlan(index);
181 _tensor_mgr->claimLayerScopePlan(index);
186 _tensor_mgr->releaseLayerScopePlan(index);
191 return _tensor_info_map.find(index) != _tensor_info_map.end();
196 return _backward_tensor_info_map.find(index) != _backward_tensor_info_map.end();
201 return _disposable_backprops.contains(index);
206 const auto pair = _operation_to_layerscope.find(index);
207 return (pair != _operation_to_layerscope.end());
213 const auto pair = _operation_to_layerscope.find(index);
214 assert(pair != _operation_to_layerscope.end());
222 const auto &ls_tensors = _tensor_reg->layerscope_tensors();
223 const auto &tensor = ls_tensors.at(index);
224 return tensor->lifetime();
229 _tensor_mgr->allocateNonConstTensors();
230 _tensor_mgr->allocateTrainableTensors();
235 _tensor_mgr->allocateBackPropTensors();
236 _tensor_mgr->allocateGradientTensors();
237 _tensor_mgr->allocateDisposableBackPropTensors();
Class that is index of DisposableTensor.
void notifyDisposableBackPropFirstUse(const DisposableTensorIndex &)
void registerLayerScopeTensor(const LayerScopeTensorIndex &index, std::shared_ptr< LayerScopeTensor > &info)
void notifyDisposableBackPropLastUse(const DisposableTensorIndex &)
bool isRegistered(const ir::OperandIndex &) const
void notifyLayerScopeLastUse(const LayerScopeTensorIndex &)
void notifyBackwardFirstUse(const ir::OperandIndex &)
void notifyLayerScopeFirstUse(const LayerScopeTensorIndex &)
void registerDisposableBackwardTensorInfo(const DisposableTensorIndex &index, const ir::OperandInfo &info)
void notifyBackwardLastUse(const ir::OperandIndex &)
bool isRegisteredDisposableBackwardTensor(const DisposableTensorIndex &index) const
const util::Set< LayerScopeTensorIndex > & getRegisteredLayerScopeTensorIndices(const ir::OperationIndex &) const
void allocateBackward(void)
void notifyLastUse(const ir::OperandIndex &)
bool isRegisteredLayerScopeTensor(const ir::OperationIndex &) const
void allocateLayerScope(void)
void notifyFirstUse(const ir::OperandIndex &)
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register tensor information to allocate on train backend.
bool isRegisteredBackward(const ir::OperandIndex &) const
LayerScopeTensorLifeTime getLayerScopeTensorLifeTime(const LayerScopeTensorIndex &) const
void registerBackwardTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register informations of tensor used only in backward to allocate on train backend.
TensorBuilder(const std::shared_ptr< TensorRegistry > &tensor_reg, const exec::train::optimizer::Optimizer *optimizer)
Base class for all optimizers.
virtual uint32_t getVarCount() const =0
Get the number of optimizer variables s.
Class to save tensor's shape and type.
Class for set of custom element &.
void add(const Element &e)
Add a given element to the set.
volatile const char info[]