51 using Lifetime = std::pair<size_t, size_t>;
52 std::unordered_map<Tensor *, Lifetime> lifetimes;
53 const size_t num_kernels = graph._kernels.size();
54 for (
size_t index = 0; index < num_kernels; ++index)
56 const auto &kernel = graph._kernels[index];
57 for (
const Tensor *tensor : kernel->getInputTensors())
59 auto nc_tensor =
const_cast<Tensor *
>(tensor);
60 if (lifetimes.count(nc_tensor) > 0)
61 lifetimes.at(nc_tensor).second = index;
63 for (
Tensor *tensor : kernel->getOutputTensors())
65 assert(lifetimes.count(tensor) == 0);
66 lifetimes[tensor] = Lifetime(index, index);
69 for (
const Tensor *tensor : graph.getOutputTensors())
71 auto nc_tensor =
const_cast<Tensor *
>(tensor);
72 if (lifetimes.count(nc_tensor) > 0)
73 lifetimes.at(nc_tensor).second = num_kernels;
75 _alloc_plan.assign(num_kernels, std::vector<Tensor *>());
76 _dealloc_plan.assign(num_kernels + 1, std::vector<Tensor *>());
77 for (
const auto &item : lifetimes)
79 _alloc_plan[item.second.first].push_back(item.first);
80 _dealloc_plan[item.second.second].push_back(item.first);
153 if (!_tensor_alloc_plan->isValid())
154 _tensor_alloc_plan->build(*
this);
159 if (event_notifier !=
nullptr)
163 if (input_tensor->is_observable())
168 for (
size_t index = 0; index < _kernels.size(); ++index)
170 const auto &kernel = _kernels[index];
171 if (event_notifier !=
nullptr)
181 _tensor_alloc_plan->allocate(index);
185 if (event_notifier !=
nullptr)
190 for (
const Tensor *tensor : kernel->getOutputTensors())
192 if (event_notifier !=
nullptr && tensor->is_observable())
197 _tensor_alloc_plan->deallocate(index);