ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
onert::compiler::MultiModelCompiler Class Referencefinal

Class to compile NN package. More...

#include <MultiModelCompiler.h>

Collaboration diagram for onert::compiler::MultiModelCompiler:

Public Member Functions

 MultiModelCompiler (const std::shared_ptr< ir::NNPkg > &nnpkg, CompilerOptions *copts)
 Construct a new Compiler object for NN package.
 
 ~MultiModelCompiler ()=default
 Destroy the MultiModelCompiler object.
 
std::shared_ptr< CompilerArtifactcompile (void)
 Do compilation with the options.
 
- Public Member Functions inherited from onert::compiler::ICompiler
virtual ~ICompiler ()=default
 Virtual ICompiler destructor.
 

Detailed Description

Class to compile NN package.

Definition at line 35 of file MultiModelCompiler.h.

Constructor & Destructor Documentation

◆ MultiModelCompiler()

onert::compiler::MultiModelCompiler::MultiModelCompiler ( const std::shared_ptr< ir::NNPkg > &  nnpkg,
CompilerOptions copts 
)

Construct a new Compiler object for NN package.

Parameters
[in]nnpkgNN package to compile
[in]coptsCompiler option for package

Definition at line 39 of file MultiModelCompiler.cc.

41 : _nnpkg{nnpkg}, _options{copts}
42{
43 // DO NOTHING
44}

◆ ~MultiModelCompiler()

onert::compiler::MultiModelCompiler::~MultiModelCompiler ( )
default

Destroy the MultiModelCompiler object.

Member Function Documentation

◆ compile()

std::shared_ptr< CompilerArtifact > onert::compiler::MultiModelCompiler::compile ( void  )
virtual

Do compilation with the options.

Returns
std::shared_ptr<CompilerArtifact> MultiModelExecutors as a result of compilation

Implements onert::compiler::ICompiler.

Definition at line 46 of file MultiModelCompiler.cc.

47{
48 /***************************************************
49 * Prepare compilation phase
50 ***************************************************/
51 {
52 if (!_options)
53 throw std::runtime_error{"Empty compile option"};
54
55 // Mode check
56 // TODO handle option for each model
57 if (_options->he_profiling_mode)
58 throw std::runtime_error("NYI: Profiling mode for multiple model is not supported yet");
59
60 _options->forceInternalOptions();
61 _options->verboseOptions();
62 }
63
64 // NYI: allow one model compilation
65 auto const model_count = _nnpkg->model_count();
66 for (uint16_t i = 0; i < model_count; i++)
67 {
68 if (!_nnpkg->model(ir::ModelIndex{i})->hasOnly<ir::Graph>())
69 throw std::runtime_error("MultiModelCompiler can only compile models for inference.");
70 }
71
72 for (uint16_t i = 0; i < model_count; i++)
73 {
74 _nnpkg->model(ir::ModelIndex{i})->iterate([&](const ir::SubgraphIndex &, ir::IGraph &graph) {
75 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
76
77 // Mandatory passes
78 pass::PassRunner{}
79 .append(std::make_unique<pass::ConstantOutputPass>(subg))
80 .append(std::make_unique<pass::OddOutputPass>(subg))
81 .run();
82
83 // Optimizations
84 pass::PassRunner{}.append(std::make_unique<pass::UnusedOperandEliminationPass>(subg)).run();
85 });
86 }
87
88 /***************************************************
89 * Backend independent analysis & optimization phase
90 ***************************************************/
91 // TODO Handle dump level for each model
92 auto dump_level = static_cast<dumper::dot::DotDumper::Level>(_options->graph_dump_level);
93 onert::dumper::dot::DotDumper dot_dumper(dump_level);
94
95 // Tracing context
96 // TODO Support tracing_ctx for multiple model
97 std::unique_ptr<util::TracingCtx> tracing_ctx = nullptr;
98
99 // Model edge context: copy model edge context
100 auto model_edges = std::make_unique<ir::ModelEdges>(_nnpkg->model_edges());
101
102 // Custom kernels
103 std::unordered_map<ir::ModelIndex, std::shared_ptr<backend::custom::IKernelBuilder>>
104 custom_kernel_builders;
105 for (uint16_t i = 0; i < model_count; i++)
106 {
107 auto const model_index = ir::ModelIndex{i};
108 custom_kernel_builders[model_index] = _nnpkg->model(model_index)->getKernelBuilder();
109 }
110
111 // Lower: Assign backend
112 std::unordered_map<ir::ModelIndex,
113 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>>
114 lowered_subgs;
115
116 for (uint16_t i = 0; i < model_count; i++)
117 {
118 auto const model_index = ir::ModelIndex{i};
119 auto model = _nnpkg->model(model_index);
120
121 model->iterate([&](const ir::SubgraphIndex &subg_index, ir::IGraph &graph) {
122 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
123
124 dot_dumper.dump(subg,
125 nnfw::misc::str("before_lower_model-", i, "-subg-", subg_index.value()));
126 // Lower: Assign backend
127 lowered_subgs[model_index][subg_index] =
128 std::make_unique<compiler::LoweredGraph>(subg, *_options);
129 // Set tracing_ctx for copied graph
130 if (tracing_ctx != nullptr)
131 tracing_ctx->setSubgraphIndex(&(lowered_subgs[model_index][subg_index]->graph()),
132 subg_index.value());
133 });
134 }
135
136 _nnpkg.reset();
137
138 for (const auto &[model_index, model_lsubg] : lowered_subgs)
139 {
140 for (const auto &[subg_index, lowered_subg] : model_lsubg)
141 {
142 dot_dumper.dump(*lowered_subg, nnfw::misc::str("after_lower_model-", model_index.value(),
143 "-subg-", subg_index.value()));
144 }
145 }
146
147 // Shape inference.
148 for (auto &&pair : lowered_subgs)
149 {
150 auto &model_lsubgs = pair.second;
151 // Run the StaticShapeInfer of primary subg. All child StaticShapeInferers are called
152 // recursively
153 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<StaticShapeInferer>> inferers =
154 createStaticShapeInferers(model_lsubgs);
155
156 const auto primary_subg_idx = ir::SubgraphIndex{0};
157 inferers.at(primary_subg_idx)->infer();
158
159 for (const auto &pair_inferer : inferers)
160 {
161 const auto inferer = pair_inferer.second.get();
162 inferer->dump();
163 }
164 }
165
166 // Shape validation
167 // TODO Move shape independent feature check from ShapeValidator to OperationValidator
168 // TODO Move ShapeValidator into shape inference
169 // - Check input tensor shape validation
170 // - Check parameter value validation which valid value is depend on input tensor shape
171 // - Output tensor shape validation check is needless because
172 // static/dynamic shape inferer will make valid output shape
173 for (const auto &pair : lowered_subgs)
174 {
175 const auto &model_lsubgs = pair.second;
176
177 for (const auto &pair_inner : model_lsubgs)
178 {
179 const auto &lowered_subg = pair_inner.second;
180 compiler::ShapeValidator{lowered_subg->graph()}();
181 }
182 }
183
184 /*************************************************************
185 * Backend independent analysis & optimization phase finished
186 *************************************************************/
187 auto executors = std::make_shared<exec::MultiModelExecutors>(std::move(model_edges));
188 for (auto &&pair : lowered_subgs)
189 {
190 auto const &model_index = pair.first;
191 auto &model_lsubgs = pair.second;
192
193 for (auto &&pair_inner : model_lsubgs)
194 {
195 auto const subg_index = pair_inner.first;
196 auto &lowered_subg = pair_inner.second;
197 auto const indexed_ranks = lowered_subg->indexed_ranks();
198
199 ir::OperationDumper dumper("Executor generation of Subgraph " +
200 std::to_string(subg_index.value()));
201 lowered_subg->graph().operations().iterate(
202 [&](const ir::OperationIndex &, const ir::IOperation &op) { op.accept(dumper); });
203
204 ExecutorFactoryArgs args;
205 args.tracing_ctx = tracing_ctx.get();
206 args.options = _options;
207 args.model_index = model_index;
208 args.custom_kernel_builder = custom_kernel_builders[model_index];
209 auto executor = std::unique_ptr<exec::IExecutor>{
210 ExecutorFactory::get().create(std::move(lowered_subg), executors, args)};
211 executor->setIndexedRanks(indexed_ranks);
212 executors->emplace(model_index, subg_index, std::move(executor));
213 }
214 }
215
216 /********************************
217 * Code generation phase finished
218 ********************************/
219 return std::make_shared<CompilerArtifact>(executors, std::move(tracing_ctx));
220}
exec::IExecutor * create(std::unique_ptr< compiler::LoweredGraph > lowered_graph, const std::shared_ptr< exec::IExecutors > &executors, const ExecutorFactoryArgs &args)
static ExecutorFactory & get()
args
Definition infer.py:21
IndexIterator iterate(const Shape &shape)
Create an object of IndexIterator for kernel.
std::string str(Args &&...args)
::onert::util::Index< uint32_t, OperationIndexTag > OperationIndex
Definition Index.h:30
::onert::util::Index< uint16_t, ModelIndexTag > ModelIndex
Definition Index.h:42
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
Definition Index.h:39
void forceInternalOptions()
Force default values of CompilerOptions for correct compilations.
void verboseOptions()
Print option value.
virtual void setIndexedRanks(std::shared_ptr< ir::OperationIndexMap< int64_t > >)=0
Set an ordering on operations.

References onert::ir::IOperation::accept(), onert::compiler::pass::PassRunner::append(), onert::compiler::ExecutorFactory::create(), onert::dumper::dot::DotDumper::dump(), onert::compiler::CompilerOptions::forceInternalOptions(), onert::compiler::ExecutorFactory::get(), onert::compiler::CompilerOptions::graph_dump_level, onert::compiler::CompilerOptions::he_profiling_mode, onert::compiler::pass::PassRunner::run(), onert::exec::IExecutor::setIndexedRanks(), nnfw::misc::str(), onert::util::Index< T, DummyTag >::value(), and onert::compiler::CompilerOptions::verboseOptions().


The documentation for this class was generated from the following files: