ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert::compiler::MultiModelCompiler Class Referencefinal

Class to compile NN package. More...

#include <MultiModelCompiler.h>

Collaboration diagram for onert::compiler::MultiModelCompiler:

Public Member Functions

 MultiModelCompiler (const std::shared_ptr< ir::NNPkg > &nnpkg, CompilerOptions *copts)
 Construct a new Compiler object for NN package.
 
 ~MultiModelCompiler ()=default
 Destroy the MultiModelCompiler object.
 
std::shared_ptr< CompilerArtifactcompile (void)
 Do compilation with the options.
 
- Public Member Functions inherited from onert::compiler::ICompiler
virtual ~ICompiler ()=default
 Virtual ICompiler destructor.
 

Detailed Description

Class to compile NN package.

Definition at line 37 of file MultiModelCompiler.h.

Constructor & Destructor Documentation

◆ MultiModelCompiler()

onert::compiler::MultiModelCompiler::MultiModelCompiler ( const std::shared_ptr< ir::NNPkg > &  nnpkg,
CompilerOptions copts 
)

Construct a new Compiler object for NN package.

Parameters
[in]nnpkgNN package to compile
[in]coptsCompiler option for package

Definition at line 41 of file MultiModelCompiler.cc.

43 : _nnpkg{nnpkg}, _options{copts}
44{
45 // DO NOTHING
46}

◆ ~MultiModelCompiler()

onert::compiler::MultiModelCompiler::~MultiModelCompiler ( )
default

Destroy the MultiModelCompiler object.

Member Function Documentation

◆ compile()

std::shared_ptr< CompilerArtifact > onert::compiler::MultiModelCompiler::compile ( void  )
virtual

Do compilation with the options.

Returns
std::shared_ptr<CompilerArtifact> MultiModelExecutors as a result of compilation

Implements onert::compiler::ICompiler.

Definition at line 48 of file MultiModelCompiler.cc.

49{
50 /***************************************************
51 * Prepare compilation phase
52 ***************************************************/
53 {
54 if (!_options)
55 throw std::runtime_error{"Empty compile option"};
56
57 // Mode check
58 // TODO handle option for each model
59 if (_options->he_profiling_mode)
60 throw std::runtime_error("NYI: Profiling mode for multiple model is not supported yet");
61
62 _options->forceInternalOptions();
63 _options->verboseOptions();
64 }
65
66 // NYI: allow one model compilation
67 auto const model_count = _nnpkg->model_count();
68 for (uint16_t i = 0; i < model_count; i++)
69 {
70 if (!_nnpkg->model(ir::ModelIndex{i})->hasOnly<ir::Graph>())
71 throw std::runtime_error("MultiModelCompiler can only compile models for inference.");
72 }
73
74 for (uint16_t i = 0; i < model_count; i++)
75 {
76 _nnpkg->model(ir::ModelIndex{i})->iterate([&](const ir::SubgraphIndex &, ir::IGraph &graph) {
77 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
78
79 // Mandatory passes
80 pass::PassRunner{}
81 .append(std::make_unique<pass::ConstantOutputPass>(subg))
82 .append(std::make_unique<pass::OddOutputPass>(subg))
83 .run();
84
85 // Optimizations
86 pass::PassRunner{}.append(std::make_unique<pass::UnusedOperandEliminationPass>(subg)).run();
87 });
88 }
89
90 /***************************************************
91 * Backend independent analysis & optimization phase
92 ***************************************************/
93 // TODO Handle dump level for each model
94 auto dump_level = static_cast<dumper::dot::DotDumper::Level>(_options->graph_dump_level);
95 onert::dumper::dot::DotDumper dot_dumper(dump_level);
96
97 // Tracing context
98 // TODO Support tracing_ctx for multiple model
99 std::unique_ptr<util::TracingCtx> tracing_ctx = nullptr;
100
101 // Model edge context: copy model edge context
102 auto model_edges = std::make_unique<ir::ModelEdges>(_nnpkg->model_edges());
103
104 // Custom kernels
105 std::unordered_map<ir::ModelIndex, std::shared_ptr<backend::custom::IKernelBuilder>>
106 custom_kernel_builders;
107 for (uint16_t i = 0; i < model_count; i++)
108 {
109 auto const model_index = ir::ModelIndex{i};
110 custom_kernel_builders[model_index] = _nnpkg->model(model_index)->getKernelBuilder();
111 }
112
113 // Lower: Assign backend
114 std::unordered_map<ir::ModelIndex,
115 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<compiler::LoweredGraph>>>
116 lowered_subgs;
117
118 for (uint16_t i = 0; i < model_count; i++)
119 {
120 auto const model_index = ir::ModelIndex{i};
121 auto model = _nnpkg->model(model_index);
122
123 model->iterate([&](const ir::SubgraphIndex &subg_index, ir::IGraph &graph) {
124 auto &subg = nnfw::misc::polymorphic_downcast<ir::Graph &>(graph);
125
126 dot_dumper.dump(subg,
127 nnfw::misc::str("before_lower_model-", i, "-subg-", subg_index.value()));
128 // Lower: Assign backend
129 lowered_subgs[model_index][subg_index] =
130 std::make_unique<compiler::LoweredGraph>(subg, *_options);
131 // Set tracing_ctx for copied graph
132 if (tracing_ctx != nullptr)
133 tracing_ctx->setSubgraphIndex(&(lowered_subgs[model_index][subg_index]->graph()),
134 subg_index.value());
135 });
136 }
137
138 _nnpkg.reset();
139
140 for (const auto &pair : lowered_subgs)
141 {
142 const auto &model_index = pair.first;
143 const auto &model_lsubg = pair.second;
144
145 for (const auto &pair_inner : model_lsubg)
146 {
147 const auto &subg_index = pair_inner.first;
148 const auto &lowered_subg = pair_inner.second;
149 dot_dumper.dump(*lowered_subg, nnfw::misc::str("after_lower_model-", model_index.value(),
150 "-subg-", subg_index.value()));
151 }
152 }
153
154 // Shape inference.
155 for (auto &&pair : lowered_subgs)
156 {
157 auto &model_lsubgs = pair.second;
158 // Run the StaticShapeInfer of primary subg. All child StaticShapeInferers are called
159 // recursively
160 std::unordered_map<ir::SubgraphIndex, std::unique_ptr<StaticShapeInferer>> inferers =
161 createStaticShapeInferers(model_lsubgs);
162
163 const auto primary_subg_idx = ir::SubgraphIndex{0};
164 inferers.at(primary_subg_idx)->infer();
165
166 for (const auto &pair_inferer : inferers)
167 {
168 const auto inferer = pair_inferer.second.get();
169 inferer->dump();
170 }
171 }
172
173 // Shape validation
174 // TODO Move shape independent feature check from ShapeValidator to OperationValidator
175 // TODO Move ShapeValidator into shape inference
176 // - Check input tensor shape validation
177 // - Check parameter value validation which valid value is depend on input tensor shape
178 // - Output tensor shape validation check is needless because
179 // static/dynamic shape inferer will make valid output shape
180 for (const auto &pair : lowered_subgs)
181 {
182 const auto &model_lsubgs = pair.second;
183
184 for (const auto &pair_inner : model_lsubgs)
185 {
186 const auto &lowered_subg = pair_inner.second;
187 compiler::ShapeValidator{lowered_subg->graph()}();
188 }
189 }
190
191 /*************************************************************
192 * Backend independent analysis & optimization phase finished
193 *************************************************************/
194 auto executors = std::make_shared<exec::MultiModelExecutors>(std::move(model_edges));
195 for (auto &&pair : lowered_subgs)
196 {
197 auto const &model_index = pair.first;
198 auto &model_lsubgs = pair.second;
199
200 for (auto &&pair_inner : model_lsubgs)
201 {
202 auto const subg_index = pair_inner.first;
203 auto &lowered_subg = pair_inner.second;
204 auto const indexed_ranks = lowered_subg->indexed_ranks();
205
206 ir::OperationDumper dumper("Executor generation of Subgraph " +
207 std::to_string(subg_index.value()));
208 lowered_subg->graph().operations().iterate(
209 [&](const ir::OperationIndex &, const ir::IOperation &op) { op.accept(dumper); });
210
211 ExecutorFactoryArgs args;
212 args.tracing_ctx = tracing_ctx.get();
213 args.options = _options;
214 args.model_index = model_index;
215 args.custom_kernel_builder = custom_kernel_builders[model_index];
216 auto executor = std::unique_ptr<exec::IExecutor>{
217 ExecutorFactory::get().create(std::move(lowered_subg), executors, args)};
218 executor->setIndexedRanks(indexed_ranks);
219 executors->emplace(model_index, subg_index, std::move(executor));
220 }
221 }
222
223 /********************************
224 * Code generation phase finished
225 ********************************/
226 return std::make_shared<CompilerArtifact>(executors, std::move(tracing_ctx));
227}
exec::IExecutor * create(std::unique_ptr< compiler::LoweredGraph > lowered_graph, const std::shared_ptr< exec::IExecutors > &executors, const ExecutorFactoryArgs &args)
static ExecutorFactory & get()
args
Definition infer.py:21
IndexIterator iterate(const Shape &shape)
Create an object of IndexIterator for kernel.
std::string str(Args &&...args)
::onert::util::Index< uint32_t, OperationIndexTag > OperationIndex
Definition Index.h:32
::onert::util::Index< uint16_t, ModelIndexTag > ModelIndex
Definition Index.h:44
::onert::util::Index< uint16_t, SubgraphIndexTag > SubgraphIndex
Definition Index.h:41
void forceInternalOptions()
Force default values of CompilerOptions for correct compilations.
void verboseOptions()
Print option value.
virtual void setIndexedRanks(std::shared_ptr< ir::OperationIndexMap< int64_t > >)=0
Set an ordering on operations.

References onert::ir::IOperation::accept(), onert::compiler::pass::PassRunner::append(), onert::compiler::ExecutorFactory::create(), onert::dumper::dot::DotDumper::dump(), onert::compiler::CompilerOptions::forceInternalOptions(), onert::compiler::ExecutorFactory::get(), onert::compiler::CompilerOptions::graph_dump_level, onert::compiler::CompilerOptions::he_profiling_mode, onert::compiler::pass::PassRunner::run(), onert::exec::IExecutor::setIndexedRanks(), nnfw::misc::str(), onert::util::Index< T, DummyTag >::value(), and onert::compiler::CompilerOptions::verboseOptions().


The documentation for this class was generated from the following files: