ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::compiler::Compiler Class Referencefinal

Class to compile NN package. More...

#include <Compiler.h>

Collaboration diagram for onert::compiler::Compiler:

Public Member Functions

 Compiler (std::unique_ptr< ir::NNPkg > nnpkg, CompilerOptions *copts)
 Construct a new Compiler object for NN package.
 
 ~Compiler ()=default
 Destroy the Compiler object.
 
std::unique_ptr< CompilerArtifactcompile (void)
 Do compilation with the options.
 
- Public Member Functions inherited from onert::compiler::ICompiler
virtual ~ICompiler ()=default
 Virtual ICompiler destructor.
 

Detailed Description

Class to compile NN package.

Definition at line 35 of file Compiler.h.

Constructor & Destructor Documentation

◆ Compiler()

onert::compiler::Compiler::Compiler ( std::unique_ptr< ir::NNPkg nnpkg,
CompilerOptions copts 
)

Construct a new Compiler object for NN package.

Parameters
[in]nnpkgNN package to compile
[in]coptsCompiler option for package

Definition at line 41 of file Compiler.cc.

42 : _nnpkg{std::move(nnpkg)}, _options{copts}
43{
44 // DO NOTHING
45}

◆ ~Compiler()

onert::compiler::Compiler::~Compiler ( )
default

Destroy the Compiler object.

Member Function Documentation

◆ compile()

std::unique_ptr< CompilerArtifact > onert::compiler::Compiler::compile ( void  )
virtual

Do compilation with the options.

Returns
std::shared_ptr<CompilerArtifact> MultiModelExecutors as a result of compilation

Implements onert::compiler::ICompiler.

Definition at line 66 of file Compiler.cc.

67{
68 /***************************************************
69 * Prepare compilation phase
70 ***************************************************/
71 {
72 if (!_options)
73 throw std::runtime_error{"Empty compile option"};
74
75 // Mode check
76 // TODO handle option for each model
77 if (_options->he_profiling_mode)
78 {
79 if (!_options->he_scheduler)
80 throw std::runtime_error("Heterogeneous scheduler must be enabled during profiling.");
81
82 if (_options->executor != "Dataflow")
83 throw std::runtime_error("Profiling mode works only with 'Dataflow' executor");
84 }
85
86 _options->forceInternalOptions();
87 _options->verboseOptions();
88 }
89
90 // NYI: allow one model compilation
91 auto const model_count = _nnpkg->model_count();
92 for (uint16_t i = 0; i < model_count; i++)
93 {
94 if (!_nnpkg->model(ir::ModelIndex{i})->hasOnly<ir::Graph>())
95 throw std::runtime_error("Compiler can only compile models for inference.");
96 }
97
98 if (model_count > 1)
99 updateOptionForMultiModel();
100
101 for (uint16_t i = 0; i < model_count; i++)
102 {
103 auto model_index = ir::ModelIndex{i};
104 _nnpkg->model(ir::ModelIndex{i})
105 ->iterate([&](const ir::SubgraphIndex &subg_index, ir::IGraph &graph) {
106 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
107
108 // Mandatory passes
109 pass::PassRunner{}
110 .append(std::make_unique<pass::ConstantOutputPass>(subg))
111 .append(std::make_unique<pass::OddOutputPass>(subg))
112 .append(
113 std::make_unique<pass::PermutationIOPass>(subg, *_options, model_index, subg_index))
114 .run();
115
116 // Optimizations
117 pass::PassRunner{}.append(std::make_unique<pass::UnusedOperandEliminationPass>(subg)).run();
118 });
119 }
120
121 /***************************************************
122 * Backend independent analysis & optimization phase
123 ***************************************************/
124 // TODO Handle dump level for each model
125 auto dump_level = static_cast<dumper::dot::DotDumper::Level>(_options->graph_dump_level);
126 onert::dumper::dot::DotDumper dot_dumper(dump_level);
127
128 // Tracing context
129 auto tracing_ctx = std::make_unique<util::TracingCtx>();
130
131 // Model edge context: copy model edge context
132 auto model_edges = std::make_unique<ir::ModelEdges>(_nnpkg->model_edges());
133
134 // Custom kernels
135 std::unordered_map<ir::ModelIndex, std::shared_ptr<backend::custom::IKernelBuilder>>
136 custom_kernel_builders;
137 for (uint16_t i = 0; i < model_count; i++)
138 {
139 auto const model_index = ir::ModelIndex{i};
140 custom_kernel_builders[model_index] = _nnpkg->model(model_index)->getKernelBuilder();
141 }
142
143 // Lower: Assign backend
144 std::unordered_map<ir::ModelIndex,
145 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>>
146 lowered_subgs;
147
148 for (uint16_t i = 0; i < model_count; i++)
149 {
150 auto const model_index = ir::ModelIndex{i};
151 auto model = _nnpkg->model(model_index);
152
153 model->iterate([&](const ir::SubgraphIndex &subg_index, ir::IGraph &graph) {
154 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
155
156 dot_dumper.dump(subg,
157 nnfw::misc::str("before_lower_model-", i, "-subg-", subg_index.value()));
158 // Lower: Assign backend
159 lowered_subgs[model_index][subg_index] =
160 std::make_unique<compiler::LoweredGraph>(subg, *_options);
161 // Set tracing_ctx for copied graph
162 tracing_ctx->setSubgraphIndex(&(lowered_subgs[model_index][subg_index]->graph()),
163 {model_index, subg_index});
164 });
165 }
166
167 _nnpkg.reset();
168
169 for (const auto &[model_index, model_lsubg] : lowered_subgs)
170 {
171 for (const auto &[subg_index, lowered_subg] : model_lsubg)
172 {
173 dot_dumper.dump(*lowered_subg, nnfw::misc::str("after_lower_model-", model_index.value(),
174 "-subg-", subg_index.value()));
175 }
176 }
177
178 // Shape inference.
179 for (auto &&pair : lowered_subgs)
180 {
181 auto &model_lsubgs = pair.second;
182 // Run the StaticShapeInfer of primary subg. All child StaticShapeInferers are called
183 // recursively
184 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<StaticShapeInferer>> inferers =
185 createStaticShapeInferers(model_lsubgs);
186
187 const auto primary_subg_idx = ir::SubgraphIndex{0};
188 inferers.at(primary_subg_idx)->infer();
189
190 for (const auto &pair_inferer : inferers)
191 {
192 const auto inferer = pair_inferer.second.get();
193 inferer->dump();
194 }
195 }
196
197 // Shape validation
198 // TODO Move shape independent feature check from ShapeValidator to OperationValidator
199 // TODO Move ShapeValidator into shape inference
200 // - Check input tensor shape validation
201 // - Check parameter value validation which valid value is depend on input tensor shape
202 // - Output tensor shape validation check is needless because
203 // static/dynamic shape inferer will make valid output shape
204 for (const auto &pair : lowered_subgs)
205 {
206 const auto &model_lsubgs = pair.second;
207
208 for (const auto &pair_inner : model_lsubgs)
209 {
210 const auto &lowered_subg = pair_inner.second;
211 compiler::ShapeValidator{lowered_subg->graph()}();
212 }
213 }
214
215 /*************************************************************
216 * Backend independent analysis & optimization phase finished
217 *************************************************************/
218 std::shared_ptr<exec::IExecutors> executors = nullptr;
219 const auto &pkg_outputs = model_edges->pkg_outputs;
220 if (model_count == 1)
221 executors = std::make_shared<exec::SingleModelExecutors>();
222 else
223 executors = std::make_shared<exec::MultiModelExecutors>(std::move(model_edges));
224
225 for (auto &&pair : lowered_subgs)
226 {
227 auto const &model_index = pair.first;
228 auto &model_lsubgs = pair.second;
229
230 for (auto &&pair_inner : model_lsubgs)
231 {
232 auto const subg_index = pair_inner.first;
233 auto &lowered_subg = pair_inner.second;
234 auto const indexed_ranks = lowered_subg->indexed_ranks();
235
236 ir::OperationDumper dumper("Executor generation of Subgraph " +
237 std::to_string(subg_index.value()));
238 lowered_subg->graph().operations().iterate(
239 [&](const ir::OperationIndex &, const ir::IOperation &op) { op.accept(dumper); });
240
241 ExecutorFactoryArgs args;
242 args.tracing_ctx = tracing_ctx.get();
243 args.options = _options;
244 args.model_index = model_index;
245 args.custom_kernel_builder = custom_kernel_builders[model_index];
246 if (_options->internal_output_alloc)
247 {
248 for (const auto &desc : pkg_outputs)
249 {
250 // Only outputs of this entry
251 if (const auto &[m, s, io] = desc; m == model_index && s == subg_index)
252 {
253 // Map IOIndex to OperandIndex
254 auto idx = lowered_subg->graph().getOutputs().at(io);
255 args.internal_io_indexes.add(idx);
256 }
257 }
258 }
259 auto executor = std::unique_ptr<exec::IExecutor>{
260 ExecutorFactory::get().create(std::move(lowered_subg), executors, args)};
261 executor->setIndexedRanks(indexed_ranks);
262 executors->emplace(model_index, subg_index, std::move(executor));
263 }
264 }
265
266 /********************************
267 * Code generation phase finished
268 ********************************/
269 return std::make_unique<CompilerArtifact>(executors, std::move(tracing_ctx));
270}
exec::IExecutor * create(std::unique_ptr< compiler::LoweredGraph > lowered_graph, const std::shared_ptr< exec::IExecutors > &executors, const ExecutorFactoryArgs &args)
static ExecutorFactory & get()
args
Definition infer.py:21
IndexIterator iterate(const Shape &shape)
Get an IndexItator object.
std::string str(Args &&...args)
::onert::util::Index< uint32_t, OperationIndexTag > OperationIndex
Definition Index.h:30
::onert::util::Index< uint16_t, ModelIndexTag > ModelIndex
Definition Index.h:42
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
Definition Index.h:39
void forceInternalOptions()
Force default values of CompilerOptions for correct compilations.
void verboseOptions()
Print option value.
virtual void setIndexedRanks(std::shared_ptr< ir::OperationIndexMap< int64_t > >)=0
Set an ordering on operations.

References onert::ir::IOperation::accept(), onert::compiler::pass::PassRunner::append(), onert::compiler::ExecutorFactory::create(), onert::dumper::dot::DotDumper::dump(), onert::compiler::CompilerOptions::executor, onert::compiler::CompilerOptions::forceInternalOptions(), onert::compiler::ExecutorFactory::get(), onert::compiler::CompilerOptions::graph_dump_level, onert::compiler::CompilerOptions::he_profiling_mode, onert::compiler::CompilerOptions::he_scheduler, onert::compiler::CompilerOptions::internal_output_alloc, m, onert::compiler::pass::PassRunner::run(), onert::exec::IExecutor::setIndexedRanks(), nnfw::misc::str(), onert::util::Index< T, DummyTag >::value(), and onert::compiler::CompilerOptions::verboseOptions().


The documentation for this class was generated from the following files: