51{
52
53
54
55
56
57
58
59
60
61
62 auto cond_exec = _executors->
at(_model_index, _cond_subg_index);
63 auto body_exec = _executors->
at(_model_index, _body_subg_index);
64
65
66 assert(cond_exec->outputSize() == 1);
67 auto cond_output_tensor = [&]() {
68 auto tensor = std::make_unique<Tensor>(cond_exec->outputInfo(0), _dyn_memory_manager);
72 }();
73
74 VERBOSE(While) <<
"Call to $" << _cond_subg_index <<
" (cond)" << std::endl;
76 cond_exec->execute(_input_tensors, {cond_output_tensor.get()},
options);
77 VERBOSE(While) <<
"Return from $" << _cond_subg_index << std::endl;
78
79 auto getResultCond = [](backend::ITensor *
tensor) ->
bool {
80 bool ret = false;
81 tensor->access([&](ITensor &tensor) { ret = *
reinterpret_cast<bool *
>(
tensor.buffer()); });
82 return ret;
83 };
84
85 std::vector<ITensor *> op_inputs(_input_tensors.begin(), _input_tensors.end());
86 std::vector<ITensor *> op_outputs(_output_tensors.begin(), _output_tensors.end());
87 std::vector<ir::PermuteType> permute_types;
88
89 for (uint32_t i = 0; i < op_outputs.size(); i++)
91
92 if (!getResultCond(cond_output_tensor.get()))
93 {
94 PermuteLayer copy_body_inputs_to_op_outputs{op_inputs, op_outputs, permute_types,
95 _external_context};
96 copy_body_inputs_to_op_outputs.run();
97 return;
98 }
99
100
101 std::vector<std::unique_ptr<Tensor>> temp_outputs_o;
102 std::vector<IPortableTensor *> temp_outputs;
103 for (uint32_t i = 0; i < body_exec->outputSize(); i++)
104 {
105 auto tensor = std::make_unique<Tensor>(body_exec->outputInfo(i), _dyn_memory_manager);
108 temp_outputs.push_back(
tensor.get());
109 temp_outputs_o.push_back(std::move(tensor));
110 }
111
112 std::vector<ITensor *> body_outputs(temp_outputs.begin(), temp_outputs.end());
113 PermuteLayer copy_body_outputs_to_op_outputs{body_outputs, op_outputs, permute_types,
114 _external_context};
115
116 const auto body_execute_with_op_inputs = [&]() {
117 VERBOSE(While) <<
"Call to $" << _body_subg_index <<
" (body)" << std::endl;
118 body_exec->execute(_input_tensors, temp_outputs, options);
119 VERBOSE(While) <<
"Return from $" << _body_subg_index << std::endl;
120 };
121
122 const auto body_execute_with_body_outputs = [&]() {
123 VERBOSE(While) <<
"Call to $" << _body_subg_index <<
" (body)" << std::endl;
124 body_exec->execute(_output_tensors, temp_outputs, options);
125 VERBOSE(While) <<
"Return from $" << _body_subg_index << std::endl;
126 };
127
128 std::function<void()> body_execute = body_execute_with_op_inputs;
129 const auto cond_execute = [&]() {
130 VERBOSE(While) <<
"Call to $" << _cond_subg_index <<
" (cond)" << std::endl;
131 cond_exec->execute(_output_tensors, {cond_output_tensor.get()},
options);
132 VERBOSE(While) <<
"Return from $" << _cond_subg_index << std::endl;
133 };
134
135
136 while (getResultCond(cond_output_tensor.get()))
137 {
138 body_execute();
139 copy_body_outputs_to_op_outputs.run();
140 cond_execute();
141 body_execute = body_execute_with_body_outputs;
142 }
143
144
145 _dyn_memory_manager->
deallocate(cond_output_tensor.get());
146 for (auto &&tensor : temp_outputs)
147 {
149 }
150}
std::shared_ptr< Allocator > allocate(const ITensor *tensor, uint32_t capacity)
void deallocate(const ITensor *tensor)
IExecutor * entryExecutor() const
virtual IExecutor * at(const ir::ModelIndex &model_index, const ir::SubgraphIndex &subg_index) const =0
Return executor of index.
#define VERBOSE(name, lv)
virtual const ExecutionOptions & currentOptions() const =0
Return current execution configuration.