ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnc::ModelAnalyzer Class Reference

Constructs inference sequence for given computational graph, gathers list of variables used in artifact. More...

#include <ModelAnalyzer.h>

Collaboration diagram for nnc::ModelAnalyzer:

Public Member Functions

void analyze (const mir::Graph *g)
 contructs inference sequence
 
void visit (mir::ops::AbsOp &) override
 
void visit (mir::ops::AddOp &op) override
 
void visit (mir::ops::AvgPool2DOp &op) override
 
void visit (mir::ops::BroadcastOp &op) override
 
void visit (mir::ops::CappedReluOp &op) override
 
void visit (mir::ops::ConcatOp &op) override
 
void visit (mir::ops::ConstantOp &op) override
 
void visit (mir::ops::Conv2DOp &op) override
 
void visit (mir::ops::DeConv2DOp &op) override
 
void visit (mir::ops::DepthwiseConv2DOp &op) override
 
void visit (mir::ops::DivOp &op) override
 
void visit (mir::ops::EluOp &op) override
 
void visit (mir::ops::FullyConnectedOp &op) override
 
void visit (mir::ops::GatherOp &op) override
 
void visit (mir::ops::InputOp &op) override
 
void visit (mir::ops::LeakyReluOp &op) override
 
void visit (mir::ops::MaxOp &op) override
 
void visit (mir::ops::MaxPool2DOp &op) override
 
void visit (mir::ops::MulOp &op) override
 
void visit (mir::ops::OutputOp &op) override
 
void visit (mir::ops::PadOp &op) override
 
void visit (mir::ops::ReduceMeanOp &op) override
 
void visit (mir::ops::ReluOp &op) override
 
void visit (mir::ops::ReshapeOp &op) override
 
void visit (mir::ops::ResizeOp &op) override
 
void visit (mir::ops::SigmoidOp &op) override
 
void visit (mir::ops::SliceOp &op) override
 
void visit (mir::ops::SoftmaxOp &op) override
 
void visit (mir::ops::SqrtOp &op) override
 
void visit (mir::ops::SqueezeOp &op) override
 
void visit (mir::ops::SubOp &op) override
 
void visit (mir::ops::TanhOp &op) override
 
void visit (mir::ops::TransposeOp &op) override
 
const std::vector< size_t > & getInputs () const
 
const std::vector< size_t > & getPersistentTensors () const
 
const std::vector< size_t > & getOutputs () const
 
const std::vector< sir::TensorDescriptor > & getTensors () const
 
const std::vector< std::unique_ptr< sir::Action > > & getInferenceSequence () const
 
std::vector< std::unique_ptr< sir::Action > > & getInferenceSequence ()
 
const std::string & getModelName () const
 
size_t getMaxTemporarySize () const
 
size_t getTempTID () const
 
- Public Member Functions inherited from mir::IVisitor
virtual ~IVisitor ()=default
 

Protected Member Functions

void visit_fallback (mir::Operation &op) override
 

Detailed Description

Constructs inference sequence for given computational graph, gathers list of variables used in artifact.

Definition at line 41 of file ModelAnalyzer.h.

Member Function Documentation

◆ analyze()

void nnc::ModelAnalyzer::analyze ( const mir::Graph g)

contructs inference sequence

Parameters
gpointer to graph to linearize

Definition at line 231 of file ModelAnalyzer.cpp.

232{
233 // Current path through graph
234 stack<pair<Operation *, size_t>> s;
235 // Nodes in Reverse Post Order stored by DFS
236 vector<Operation *> post_order;
237 // Set contains pointer to node if it is visited by DFS
238 set<Operation *> visited;
239
240 vector<Operation *> init_ops;
241 for (Operation *op : g->getNodes())
242 {
243 if (op->getNumInputs() == 0)
244 {
245 init_ops.emplace_back(op);
246 }
247 }
248
249 // Register temporary tensor for im2col buffer
250 _temp_tensor_id = declareTemporaryTensor();
251
252 // Walk all network inputs
253 for (Operation *in : init_ops)
254 {
255 if (!visited.count(in))
256 {
257 visited.insert(in);
258 s.push({in, 0});
259 }
260
261 // main DFS loop
262 while (!s.empty())
263 {
264 // top stores current node and current outgoing edge from it
265 auto &top = s.top();
266 Operation *node = top.first;
267 auto edge = top.second++;
268 // FIXME Refactor me.
269 std::vector<Operation *> next_nodes;
270 for (const auto &out : node->getOutputs())
271 {
272 const auto &uses = out.getUses();
273 std::transform(uses.begin(), uses.end(), std::back_inserter(next_nodes),
274 [](Operation::Use use) { return use.getNode(); });
275 }
276 if (edge == next_nodes.size())
277 {
278 // this node is fully analyzed, push it into RPO and pop from stack
279 post_order.push_back(node);
280 s.pop();
281 }
282 else
283 {
284 // Search current outgoing edge
285 Operation *successor = next_nodes[edge];
286 if (!visited.count(successor))
287 {
288 visited.insert(successor);
289 s.push({next_nodes[edge], 0});
290 }
291 }
292 }
293 }
294
295 constructInferenceSequence(post_order);
296
297 collectOutputs(g);
298}
const std::vector< size_t > & getOutputs() const

References mir::Operation::getOutputs().

Referenced by nnc::CPPCodeGenerator::run(), and TEST().

◆ getInferenceSequence() [1/2]

std::vector< std::unique_ptr< sir::Action > > & nnc::ModelAnalyzer::getInferenceSequence ( )
inline
Returns
Inference sequence

Definition at line 115 of file ModelAnalyzer.h.

115{ return _inferenceSequence; }

◆ getInferenceSequence() [2/2]

const std::vector< std::unique_ptr< sir::Action > > & nnc::ModelAnalyzer::getInferenceSequence ( ) const
inline
Returns
Inference sequence

Definition at line 107 of file ModelAnalyzer.h.

108 {
109 return _inferenceSequence;
110 }

Referenced by nnc::CPPCodeGenerator::run(), and TEST().

◆ getInputs()

const std::vector< size_t > & nnc::ModelAnalyzer::getInputs ( ) const
inline
Returns
vector of id's of network input tensors

Definition at line 87 of file ModelAnalyzer.h.

87{ return _inputs; }

◆ getMaxTemporarySize()

size_t nnc::ModelAnalyzer::getMaxTemporarySize ( ) const
inline

Definition at line 122 of file ModelAnalyzer.h.

122{ return _max_temp_size; }

◆ getModelName()

const std::string & nnc::ModelAnalyzer::getModelName ( ) const
inline
Returns
Model name, taken from Model IR

Definition at line 120 of file ModelAnalyzer.h.

120{ return _modelName; }

◆ getOutputs()

const std::vector< size_t > & nnc::ModelAnalyzer::getOutputs ( ) const
inline
Returns
vector of id's of network output tensors

Definition at line 97 of file ModelAnalyzer.h.

97{ return _outputs; }

◆ getPersistentTensors()

const std::vector< size_t > & nnc::ModelAnalyzer::getPersistentTensors ( ) const
inline
Returns
vector of id's of tensors with unique names taken from Model IR

Definition at line 92 of file ModelAnalyzer.h.

92{ return _persistent_tensors; }

◆ getTempTID()

size_t nnc::ModelAnalyzer::getTempTID ( ) const
inline

Definition at line 124 of file ModelAnalyzer.h.

124{ return _temp_tensor_id; }

◆ getTensors()

const std::vector< sir::TensorDescriptor > & nnc::ModelAnalyzer::getTensors ( ) const
inline
Returns
vector of all network tensors

Definition at line 102 of file ModelAnalyzer.h.

102{ return _tensors; }

◆ visit() [1/33]

void nnc::ModelAnalyzer::visit ( mir::ops::AbsOp op)
override

Definition at line 423 of file ModelAnalyzer.cpp.

423{ appendOperationToInference(&op, "absFN"); }

◆ visit() [2/33]

void nnc::ModelAnalyzer::visit ( mir::ops::AddOp op)
override

Definition at line 425 of file ModelAnalyzer.cpp.

426{
427 appendOperationToInference(&op, "ElementWise<Add>");
428}

◆ visit() [3/33]

void nnc::ModelAnalyzer::visit ( mir::ops::AvgPool2DOp op)
override

Definition at line 320 of file ModelAnalyzer.cpp.

320{ appendOperationToInference(&op, "avgPool"); }

◆ visit() [4/33]

void nnc::ModelAnalyzer::visit ( mir::ops::BroadcastOp op)
override

Definition at line 329 of file ModelAnalyzer.cpp.

329{ appendOperationToInference(&op, "broadcast"); }

◆ visit() [5/33]

void nnc::ModelAnalyzer::visit ( mir::ops::CappedReluOp op)
override

Definition at line 331 of file ModelAnalyzer.cpp.

331{ appendOperationToInference(&op, "cappedRelu"); }

◆ visit() [6/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ConcatOp op)
override

Definition at line 300 of file ModelAnalyzer.cpp.

300{ appendOperationToInference(&op, "concat"); }

◆ visit() [7/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ConstantOp op)
override

Definition at line 339 of file ModelAnalyzer.cpp.

340{
341 assert(op.getNumInputs() == 0);
342
343 // FIXME This is to work around deserializeTensors not being able to deserialize tensors of type
344 // other than float32.
345 const auto *output = op.getOutput(0);
346 if (output->getUses().empty())
347 return;
348
349 appendOperationToInference(&op, "constant");
350}
std::size_t getNumInputs() const
Definition Operation.h:128
Output * getOutput(std::size_t index)
Definition Operation.h:149

References mir::Operation::getNumInputs(), and mir::Operation::getOutput().

◆ visit() [8/33]

void nnc::ModelAnalyzer::visit ( mir::ops::Conv2DOp op)
override

Definition at line 302 of file ModelAnalyzer.cpp.

303{
304 assert(op.getNumGroups() == 1);
305 const auto &kernel_shape = op.getInputShape(1);
306 const auto &out_shape = op.getOutputShape(0);
307 const int32_t tmp_size = kernel_shape.dim(1) * kernel_shape.dim(2) * kernel_shape.dim(3) *
308 out_shape.dim(0) * out_shape.dim(1) * out_shape.dim(2);
309 updateMaxTemporarySize(static_cast<size_t>(tmp_size));
310 appendOperationToInference(&op, "conv2d", {_temp_tensor_id});
311}
const Shape & getInputShape(std::size_t index) const
Definition Operation.h:161
const Shape & getOutputShape(std::size_t index) const
Definition Operation.h:163
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
std::int32_t getNumGroups() const
Definition Conv2DOp.h:58

References mir::Shape::dim(), mir::Operation::getInputShape(), mir::ops::Conv2DOp::getNumGroups(), and mir::Operation::getOutputShape().

◆ visit() [9/33]

void nnc::ModelAnalyzer::visit ( mir::ops::DeConv2DOp op)
override

Definition at line 386 of file ModelAnalyzer.cpp.

387{
388 const auto &kernel_shape = op.getInputShape(1);
389 const auto &out_shape = op.getOutputShape(0);
390 const int32_t tmp_size = kernel_shape.dim(0) * kernel_shape.dim(1) * kernel_shape.dim(3) *
391 out_shape.dim(0) * out_shape.dim(1) * out_shape.dim(2);
392 updateMaxTemporarySize(static_cast<size_t>(tmp_size));
393 appendOperationToInference(&op, "convTransposed2d", {_temp_tensor_id});
394}

References mir::Shape::dim(), mir::Operation::getInputShape(), and mir::Operation::getOutputShape().

◆ visit() [10/33]

void nnc::ModelAnalyzer::visit ( mir::ops::DepthwiseConv2DOp op)
override

Definition at line 313 of file ModelAnalyzer.cpp.

314{
315 appendOperationToInference(&op, "depthwiseConv2d");
316}

◆ visit() [11/33]

void nnc::ModelAnalyzer::visit ( mir::ops::DivOp op)
override

Definition at line 430 of file ModelAnalyzer.cpp.

431{
432 appendOperationToInference(&op, "ElementWise<Div>");
433}

◆ visit() [12/33]

void nnc::ModelAnalyzer::visit ( mir::ops::EluOp op)
override

Definition at line 384 of file ModelAnalyzer.cpp.

384{ appendOperationToInference(&op, "elu"); }

◆ visit() [13/33]

void nnc::ModelAnalyzer::visit ( mir::ops::FullyConnectedOp op)
override

Definition at line 324 of file ModelAnalyzer.cpp.

325{
326 appendOperationToInference(&op, "fullConnect");
327}

◆ visit() [14/33]

void nnc::ModelAnalyzer::visit ( mir::ops::GatherOp op)
override

Definition at line 412 of file ModelAnalyzer.cpp.

412{ appendOperationToInference(&op, "gather"); }

◆ visit() [15/33]

void nnc::ModelAnalyzer::visit ( mir::ops::InputOp op)
override

Definition at line 333 of file ModelAnalyzer.cpp.

334{
335 assert(op.getNumInputs() == 0);
336 appendOperationToInference(&op, "in");
337}

References mir::Operation::getNumInputs().

◆ visit() [16/33]

void nnc::ModelAnalyzer::visit ( mir::ops::LeakyReluOp op)
override

Definition at line 416 of file ModelAnalyzer.cpp.

417{
418 appendOperationToInference(&op, "leakyRelu");
419}

◆ visit() [17/33]

void nnc::ModelAnalyzer::visit ( mir::ops::MaxOp op)
override

Definition at line 435 of file ModelAnalyzer.cpp.

436{
437 appendOperationToInference(&op, "ElementWise<Max>");
438}

◆ visit() [18/33]

void nnc::ModelAnalyzer::visit ( mir::ops::MaxPool2DOp op)
override

Definition at line 322 of file ModelAnalyzer.cpp.

322{ appendOperationToInference(&op, "maxPool"); }

◆ visit() [19/33]

void nnc::ModelAnalyzer::visit ( mir::ops::MulOp op)
override

Definition at line 440 of file ModelAnalyzer.cpp.

441{
442 appendOperationToInference(&op, "ElementWise<Mul>");
443}

◆ visit() [20/33]

void nnc::ModelAnalyzer::visit ( mir::ops::OutputOp op)
override

Definition at line 421 of file ModelAnalyzer.cpp.

421{ appendOperationToInference(&op, "out"); }

◆ visit() [21/33]

void nnc::ModelAnalyzer::visit ( mir::ops::PadOp op)
override

Definition at line 400 of file ModelAnalyzer.cpp.

400{ appendOperationToInference(&op, "pad"); }

◆ visit() [22/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ReduceMeanOp op)
override

Definition at line 402 of file ModelAnalyzer.cpp.

403{
404 appendOperationToInference(&op, "reduceMean");
405}

◆ visit() [23/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ReluOp op)
override

Definition at line 352 of file ModelAnalyzer.cpp.

352{ appendOperationToInference(&op, "relu"); }

◆ visit() [24/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ReshapeOp op)
override

Definition at line 354 of file ModelAnalyzer.cpp.

354{ appendOperationToInference(&op, "reshape"); }

◆ visit() [25/33]

void nnc::ModelAnalyzer::visit ( mir::ops::ResizeOp op)
override

Definition at line 356 of file ModelAnalyzer.cpp.

357{
358 const auto &in_shape = op.getInputShape(0);
359 const auto &out_shape = op.getOutputShape(0);
360
361 assert(in_shape.rank() == 4);
362 assert(in_shape.rank() == out_shape.rank());
363
364 if (in_shape.dim(0) != out_shape.dim(0) || in_shape.dim(3) != out_shape.dim(3))
365 throw std::runtime_error("Not supported Resize on other dims besides height and width!");
366
367 switch (op.getMode())
368 {
370 appendOperationToInference(&op, "resize");
371 break;
372 default:
373 assert(false && "Not Implemented!");
374 }
375}
ResizeMethod getMode() const
Definition ResizeOp.h:75

References mir::Operation::getInputShape(), mir::ops::ResizeOp::getMode(), mir::Operation::getOutputShape(), and mir::ops::ResizeOp::nearestNeighbor.

◆ visit() [26/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SigmoidOp op)
override

Definition at line 414 of file ModelAnalyzer.cpp.

414{ appendOperationToInference(&op, "sigmoid"); }

◆ visit() [27/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SliceOp op)
override

Definition at line 377 of file ModelAnalyzer.cpp.

377{ appendOperationToInference(&op, "slice"); }

◆ visit() [28/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SoftmaxOp op)
override

Definition at line 318 of file ModelAnalyzer.cpp.

318{ appendOperationToInference(&op, "softmax"); }

◆ visit() [29/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SqrtOp op)
override

Definition at line 398 of file ModelAnalyzer.cpp.

398{ appendOperationToInference(&op, "sqrtFN"); }

◆ visit() [30/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SqueezeOp op)
override

Definition at line 396 of file ModelAnalyzer.cpp.

396{ appendOperationToInference(&op, "reshape"); }

◆ visit() [31/33]

void nnc::ModelAnalyzer::visit ( mir::ops::SubOp op)
override

Definition at line 445 of file ModelAnalyzer.cpp.

446{
447 appendOperationToInference(&op, "ElementWise<Sub>");
448}

◆ visit() [32/33]

void nnc::ModelAnalyzer::visit ( mir::ops::TanhOp op)
override

Definition at line 379 of file ModelAnalyzer.cpp.

380{
381 appendOperationToInference(&op, "tanhActivation");
382}

◆ visit() [33/33]

void nnc::ModelAnalyzer::visit ( mir::ops::TransposeOp op)
override

Definition at line 407 of file ModelAnalyzer.cpp.

408{
409 appendOperationToInference(&op, "transpose");
410}

◆ visit_fallback()

void nnc::ModelAnalyzer::visit_fallback ( mir::Operation op)
overrideprotectedvirtual

Reimplemented from mir::Visitor.

Definition at line 450 of file ModelAnalyzer.cpp.

450{ throw std::runtime_error("NYI operation"); }

The documentation for this class was generated from the following files: