Do compilation with the options.
98{
99
100
101
102 {
103 if (!_options)
104 throw std::runtime_error{"Empty compile option"};
105
106
107
109 {
111 throw std::runtime_error("Heterogeneous scheduler must be enabled during profiling.");
112
113 if (_options->
executor !=
"Dataflow")
114 throw std::runtime_error("Profiling mode works only with 'Dataflow' executor");
115 }
116
119 }
120
121
122 auto const model_count = _nnpkg->model_count();
123 for (uint16_t i = 0; i < model_count; i++)
124 {
126 throw std::runtime_error("Compiler can only compile models for inference.");
127 }
128
129 std::unordered_map<ir::ModelIndex, CompilerOptions> model_options;
130 for (uint16_t i = 0; i < model_count; i++)
131 {
133 model_options[model_index] = optionForSingleModel(model_index);
135 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
136
137
138 pass::PassRunner{}
139 .append(std::make_unique<pass::ConstantOutputPass>(subg))
140 .append(std::make_unique<pass::OddOutputPass>(subg))
141 .append(std::make_unique<pass::PermutationIOPass>(subg, model_options[model_index]))
142 .run();
143
144
145 pass::PassRunner{}.append(std::make_unique<pass::UnusedOperandEliminationPass>(subg)).run();
146 });
147 }
148
149
150
151
152
155
156
157 auto tracing_ctx = std::make_unique<util::TracingCtx>();
158
159
160 auto model_edges = std::make_unique<ir::ModelEdges>(_nnpkg->model_edges());
161
162
163 std::unordered_map<ir::ModelIndex, std::shared_ptr<backend::custom::IKernelBuilder>>
164 custom_kernel_builders;
165 for (uint16_t i = 0; i < model_count; i++)
166 {
168 custom_kernel_builders[model_index] = _nnpkg->model(model_index)->getKernelBuilder();
169 }
170
171
173 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>>
174 lowered_subgs;
175
176 for (uint16_t i = 0; i < model_count; i++)
177 {
179 auto model = _nnpkg->model(model_index);
180
182 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
183
184 dot_dumper.dump(subg,
185 nnfw::misc::str(
"before_lower_model-", i,
"-subg-", subg_index.value()));
186
187 lowered_subgs[model_index][subg_index] =
188 std::make_unique<compiler::LoweredGraph>(subg, model_options[model_index]);
189
190 tracing_ctx->setSubgraphIndex(&(lowered_subgs[model_index][subg_index]->
graph()),
191 {model_index, subg_index});
192 });
193 }
194
195 _nnpkg.reset();
196
197 for (const auto &[model_index, model_lsubg] : lowered_subgs)
198 {
199 for (const auto &[subg_index, lowered_subg] : model_lsubg)
200 {
201 dot_dumper.dump(*lowered_subg,
nnfw::misc::str(
"after_lower_model-", model_index.value(),
202 "-subg-", subg_index.value()));
203 }
204 }
205
206
207 for (auto &&pair : lowered_subgs)
208 {
209 auto &model_lsubgs = pair.second;
210
211
212 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<StaticShapeInferer>> inferers =
213 createStaticShapeInferers(model_lsubgs);
214
216 inferers.at(primary_subg_idx)->infer();
217
218 for (const auto &pair_inferer : inferers)
219 {
220 const auto inferer = pair_inferer.second.get();
221 inferer->dump();
222 }
223 }
224
225
226
227
228
229
230
231
232 for (const auto &pair : lowered_subgs)
233 {
234 const auto &model_lsubgs = pair.second;
235
236 for (const auto &pair_inner : model_lsubgs)
237 {
238 const auto &lowered_subg = pair_inner.second;
239 compiler::ShapeValidator{lowered_subg->graph()}();
240 }
241 }
242
243
244
245
246 std::shared_ptr<exec::IExecutors> executors = nullptr;
247 const auto &pkg_outputs = model_edges->pkg_outputs;
248 if (model_count == 1)
249 executors = std::make_shared<exec::SingleModelExecutors>();
250 else
251 executors = std::make_shared<exec::MultiModelExecutors>(std::move(model_edges));
252
253 for (auto &&pair : lowered_subgs)
254 {
255 auto const &model_index = pair.first;
256 auto &model_lsubgs = pair.second;
257
258 for (auto &&pair_inner : model_lsubgs)
259 {
260 auto const subg_index = pair_inner.first;
261 auto &lowered_subg = pair_inner.second;
262 auto const indexed_ranks = lowered_subg->indexed_ranks();
263
264 ir::OperationDumper dumper("Executor generation of Subgraph " +
265 std::to_string(subg_index.value()));
266 lowered_subg->graph().operations().iterate(
268
269 ExecutorFactoryArgs
args;
270 args.tracing_ctx = tracing_ctx.get();
271 args.options = &model_options[model_index];
272 args.model_index = model_index;
273 args.custom_kernel_builder = custom_kernel_builders[model_index];
275 {
276 for (const auto &desc : pkg_outputs)
277 {
278
279 if (
const auto &[
m, s, io] = desc;
m == model_index &&
s == subg_index)
280 {
281
282 auto idx = lowered_subg->graph().getOutputs().at(io);
283 args.internal_io_indexes.add(idx);
284 }
285 }
286 }
287 auto executor = std::unique_ptr<exec::IExecutor>{
290 executors->emplace(model_index, subg_index, std::move(executor));
291 }
292 }
293
294
295
296
297 return std::make_unique<CompilerArtifact>(executors, std::move(tracing_ctx));
298}
exec::IExecutor * create(std::unique_ptr< compiler::LoweredGraph > lowered_graph, const std::shared_ptr< exec::IExecutors > &executors, const ExecutorFactoryArgs &args)
static ExecutorFactory & get()
IndexIterator iterate(const Shape &shape)
Create an object of IndexIterator for kernel.
std::string str(Args &&...args)
::onert::util::Index< uint32_t, OperationIndexTag > OperationIndex
::onert::util::Index< uint16_t, ModelIndexTag > ModelIndex
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
void forceInternalOptions()
Force default values of CompilerOptions for correct compilations.
bool internal_output_alloc
void verboseOptions()
Print option value.
virtual void setIndexedRanks(std::shared_ptr< ir::OperationIndexMap< int64_t > >)=0
Set an ordering on operations.