45{
46
47
48
49
50
51
52
53
54
55
56 auto cond_exec = _executors->
at(_model_index, _cond_subg_index);
57 auto body_exec = _executors->
at(_model_index, _body_subg_index);
58
59
60 assert(cond_exec->outputSize() == 1);
61 auto cond_output_tensor = [&]() {
62 auto tensor = std::make_unique<Tensor>(cond_exec->outputInfo(0), _dyn_memory_manager);
66 }();
67
68 VERBOSE(While) <<
"Call to $" << _cond_subg_index <<
" (cond)" << std::endl;
70 cond_exec->execute(_input_tensors, {cond_output_tensor.get()},
options);
71 VERBOSE(While) <<
"Return from $" << _cond_subg_index << std::endl;
72
73 auto getResultCond = [](backend::ITensor *
tensor) ->
bool {
74 bool ret = false;
75 tensor->access([&](ITensor &tensor) { ret = *
reinterpret_cast<bool *
>(
tensor.buffer()); });
76 return ret;
77 };
78
79 std::vector<ITensor *> op_inputs(_input_tensors.begin(), _input_tensors.end());
80 std::vector<ITensor *> op_outputs(_output_tensors.begin(), _output_tensors.end());
81 std::vector<ir::PermuteType> permute_types;
82
83 for (uint32_t i = 0; i < op_outputs.size(); i++)
85
86 if (!getResultCond(cond_output_tensor.get()))
87 {
88 PermuteLayer copy_body_inputs_to_op_outputs{op_inputs, op_outputs, permute_types,
89 _external_context};
90 copy_body_inputs_to_op_outputs.run();
91 return;
92 }
93
94
95 std::vector<std::unique_ptr<Tensor>> temp_outputs_o;
96 std::vector<IPortableTensor *> temp_outputs;
97 for (uint32_t i = 0; i < body_exec->outputSize(); i++)
98 {
99 auto tensor = std::make_unique<Tensor>(body_exec->outputInfo(i), _dyn_memory_manager);
102 temp_outputs.push_back(
tensor.get());
103 temp_outputs_o.push_back(std::move(tensor));
104 }
105
106 std::vector<ITensor *> body_outputs(temp_outputs.begin(), temp_outputs.end());
107 PermuteLayer copy_body_outputs_to_op_outputs{body_outputs, op_outputs, permute_types,
108 _external_context};
109
110 const auto body_execute_with_op_inputs = [&]() {
111 VERBOSE(While) <<
"Call to $" << _body_subg_index <<
" (body)" << std::endl;
112 body_exec->execute(_input_tensors, temp_outputs, options);
113 VERBOSE(While) <<
"Return from $" << _body_subg_index << std::endl;
114 };
115
116 const auto body_execute_with_body_outputs = [&]() {
117 VERBOSE(While) <<
"Call to $" << _body_subg_index <<
" (body)" << std::endl;
118 body_exec->execute(_output_tensors, temp_outputs, options);
119 VERBOSE(While) <<
"Return from $" << _body_subg_index << std::endl;
120 };
121
122 std::function<void()> body_execute = body_execute_with_op_inputs;
123 const auto cond_execute = [&]() {
124 VERBOSE(While) <<
"Call to $" << _cond_subg_index <<
" (cond)" << std::endl;
125 cond_exec->execute(_output_tensors, {cond_output_tensor.get()},
options);
126 VERBOSE(While) <<
"Return from $" << _cond_subg_index << std::endl;
127 };
128
129
130 while (getResultCond(cond_output_tensor.get()))
131 {
132 body_execute();
133 copy_body_outputs_to_op_outputs.run();
134 cond_execute();
135 body_execute = body_execute_with_body_outputs;
136 }
137
138
139 _dyn_memory_manager->
deallocate(cond_output_tensor.get());
140 for (auto &&tensor : temp_outputs)
141 {
143 }
144}
std::shared_ptr< Allocator > allocate(const ITensor *tensor, uint32_t capacity)
void deallocate(const ITensor *tensor)
IExecutor * entryExecutor() const
virtual IExecutor * at(const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index) const =0
Return executor of index.
#define VERBOSE(name, lv)
virtual const ExecutionOptions & currentOptions() const =0
Return current execution configuration.