26 : _tensor_reg{tensor_reg}, _tensor_mgr{new
TensorManager(tensor_reg, optimizer->getVarCount())},
34 _tensor_info_map.emplace(index,
info);
35 _as_constants[index] =
info.isConstant();
37 assert(!
info.isDynamic());
41 if (_as_constants[index])
43 auto tensor = std::make_unique<TrainableTensor>(
info);
44 _tensor_reg->setTrainableTensor(index, std::move(tensor));
48 auto tensor = std::make_unique<Tensor>(
info);
49 _tensor_reg->setNonConstTensor(index, std::move(tensor));
56 _backward_tensor_info_map.emplace(index,
info);
58 assert(!
info.isDynamic());
62 assert(_as_constants[index] ==
info.isConstant());
63 if (_as_constants[index])
65 auto tensor = std::make_unique<GradientTensor>(
info);
66 _tensor_reg->setGradientTensor(index, std::move(tensor));
69 for (uint32_t i = 0; i < _optimizer->
getVarCount(); ++i)
71 auto tensor = std::make_unique<Tensor>(
info);
72 _tensor_reg->getTrainableTensor(index)->appendOptVar(std::move(tensor));
77 auto tensor = std::make_unique<BackPropTensor>(
info);
78 _tensor_reg->setBackPropTensor(index, std::move(tensor));
85 assert(!
info.isDynamic());
86 assert(!_as_constants[index.operand_index()]);
88 auto disposable_tensor = std::make_unique<BackPropTensor>(
info);
89 _tensor_reg->setDisposableBackPropTensor(index, std::move(disposable_tensor));
91 _disposable_backprops.add(index);
95 std::shared_ptr<LayerScopeTensor> &tensor)
97 const auto op_idx = index.op_index();
99 const auto pair = _operation_to_layerscope.find(op_idx);
100 if (pair == _operation_to_layerscope.end())
103 tensor_indices.
add(index);
104 _operation_to_layerscope[op_idx] = tensor_indices;
108 assert(!pair->second.contains(index));
109 pair->second.add(index);
112 _tensor_reg->setLayerScopeTensor(index, tensor);
118 if (_as_constants[index])
120 _tensor_mgr->claimTrainablePlan(index);
124 _tensor_mgr->claimNonConstPlan(index);
130 if (_as_constants[index])
132 _tensor_mgr->releaseTrainablePlan(index);
136 _tensor_mgr->releaseNonConstPlan(index);
143 if (_as_constants[index])
145 _tensor_mgr->claimGradientPlan(index);
149 _tensor_mgr->claimBackPropPlan(index);
155 if (_as_constants[index])
157 _tensor_mgr->releaseGradientPlan(index);
161 _tensor_mgr->releaseBackPropPlan(index);
167 _tensor_mgr->claimDisposableBackPropPlan(index);
172 _tensor_mgr->releaseDisposableBackPropPlan(index);
177 _tensor_mgr->claimLayerScopePlan(index);
182 _tensor_mgr->releaseLayerScopePlan(index);
187 return _tensor_info_map.find(index) != _tensor_info_map.end();
192 return _backward_tensor_info_map.find(index) != _backward_tensor_info_map.end();
197 return _disposable_backprops.contains(index);
202 const auto pair = _operation_to_layerscope.find(index);
203 return (pair != _operation_to_layerscope.end());
209 const auto pair = _operation_to_layerscope.find(index);
210 assert(pair != _operation_to_layerscope.end());
218 const auto &ls_tensors = _tensor_reg->layerscope_tensors();
219 const auto &tensor = ls_tensors.at(index);
220 return tensor->lifetime();
225 _tensor_mgr->allocateNonConstTensors();
226 _tensor_mgr->allocateTrainableTensors();
231 _tensor_mgr->allocateBackPropTensors();
232 _tensor_mgr->allocateGradientTensors();
233 _tensor_mgr->allocateDisposableBackPropTensors();
Class that is index of DisposableTensor.
void notifyDisposableBackPropFirstUse(const DisposableTensorIndex &)
void registerLayerScopeTensor(const LayerScopeTensorIndex &index, std::shared_ptr< LayerScopeTensor > &info)
void notifyDisposableBackPropLastUse(const DisposableTensorIndex &)
bool isRegistered(const ir::OperandIndex &) const
void notifyLayerScopeLastUse(const LayerScopeTensorIndex &)
void notifyBackwardFirstUse(const ir::OperandIndex &)
void notifyLayerScopeFirstUse(const LayerScopeTensorIndex &)
void registerDisposableBackwardTensorInfo(const DisposableTensorIndex &index, const ir::OperandInfo &info)
void notifyBackwardLastUse(const ir::OperandIndex &)
bool isRegisteredDisposableBackwardTensor(const DisposableTensorIndex &index) const
const util::Set< LayerScopeTensorIndex > & getRegisteredLayerScopeTensorIndices(const ir::OperationIndex &) const
void allocateBackward(void)
void notifyLastUse(const ir::OperandIndex &)
bool isRegisteredLayerScopeTensor(const ir::OperationIndex &) const
void allocateLayerScope(void)
void notifyFirstUse(const ir::OperandIndex &)
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register tensor information to allocate on train backend.
bool isRegisteredBackward(const ir::OperandIndex &) const
LayerScopeTensorLifeTime getLayerScopeTensorLifeTime(const LayerScopeTensorIndex &) const
void registerBackwardTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register informations of tensor used only in backward to allocate on train backend.
TensorBuilder(const std::shared_ptr< TensorRegistry > &tensor_reg, const exec::train::optimizer::Optimizer *optimizer)
Base class for all optimizers.
virtual uint32_t getVarCount() const =0
Get the number of optimizer variables s.
Class to save tensor's shape and type.
Class for set of custom element &.
void add(const Element &e)
Add a given element to the set.
volatile const char info[]