31 const std::shared_ptr<TensorRegistry> &tensor_reg,
35 shared_memory_operand_indexes)},
36 _shared_memory_operand_indexes{shared_memory_operand_indexes}
42 const std::shared_ptr<TensorRegistry> &tensor_reg,
const std::string planner_id,
45 _static_tensor_mgr{new
StaticTensorManager(_tensor_reg, planner_id, _dynamic_tensor_mgr.get(),
46 shared_memory_operand_indexes)}
53 _tensor_info_map.emplace(ind,
info);
57 _dynamic_tensor_mgr->buildTensor(ind,
info);
61 _static_tensor_mgr->buildTensor(ind,
info,
info.isConstant());
67 assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
68 const auto &tensor_info = _tensor_info_map.at(ind);
70 if (!_tensor_reg->getNativeTensor(ind)->is_dynamic())
72 const auto size = tensor_info.total_size();
73 _static_tensor_mgr->claimPlan(ind,
size);
79 if (!_tensor_reg->getNativeTensor(ind)->is_dynamic())
81 _static_tensor_mgr->releasePlan(ind);
87 return _tensor_info_map.find(ind) != _tensor_info_map.end();
94 return _shared_memory_operand_indexes;
Class to manage dynamic tensor and its memory.
TensorBuilder(const std::shared_ptr< TensorRegistry > &tensor_reg, const ir::OperandIndexMap< ir::OperandIndex > &shared_memory_operand_indexes={})
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info)
Register tensor information to allocate on CPU backend.
void notifyLastUse(const ir::OperandIndex &)
const ir::OperandIndexMap< ir::OperandIndex > & getSharedMemoryOperandIndexes() const
void notifyFirstUse(const ir::OperandIndex &)
bool isRegistered(const ir::OperandIndex &) const
Class to save tensor's shape and type.
volatile const char info[]
std::unordered_map< OperandIndex, T > OperandIndexMap