ONE - On-device Neural Engine
Loading...
Searching...
No Matches
mir2loco.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mir2loco.h"
18
19#include "mir/ops/AddOp.h"
20#include "mir/ops/AvgPool2DOp.h"
21#include "mir/ops/ConcatOp.h"
22#include "mir/ops/ConstantOp.h"
23#include "mir/ops/Conv2DOp.h"
24#include "mir/ops/Deconv2DOp.h"
26#include "mir/ops/DivOp.h"
28#include "mir/ops/MaxPool2DOp.h"
29#include "mir/ops/MulOp.h"
30#include "mir/ops/ReluOp.h"
31#include "mir/ops/ReshapeOp.h"
32#include "mir/ops/SoftmaxOp.h"
33#include "mir/ops/SubOp.h"
34#include "mir/ops/TransposeOp.h"
35
36#include "mir/ShapeRange.h"
37
38#include <cassert>
39#include <cstring>
40#include <memory>
41
42namespace mir2loco
43{
44namespace
45{
46template <class NodeType> void setupShape(const mir::Shape &shape, NodeType *node)
47{
48 node->rank(shape.rank());
49 for (int32_t i = 0; i < shape.rank(); i++)
50 {
51 node->dim(i) = static_cast<uint32_t>(shape.dim(i));
52 }
53}
54
55std::unique_ptr<loco::TensorShape> make_tensor_shape(const mir::Shape &shape)
56{
57 auto res = std::make_unique<loco::TensorShape>();
58 setupShape(shape, res.get());
59 return std::move(res);
60}
61
62void setupPad(const std::vector<std::int32_t> &padding_before,
63 const std::vector<std::int32_t> &padding_after, loco::Padding2D *pad)
64{
65 assert(padding_before.size() == 2 && padding_after.size() == 2);
66 pad->top(padding_before[0]);
67 pad->left(padding_before[1]);
68 pad->bottom(padding_after[0]);
69 pad->right(padding_after[1]);
70}
71
72void setupWindow(const std::vector<std::int32_t> &window_size, loco::Window<2> *window)
73{
74 assert(window_size.size() == 2);
75 window->vertical(window_size[0]);
76 window->horizontal(window_size[1]);
77}
78
79void setupStride(const std::vector<std::int32_t> &strides, loco::Stride<2> *stride)
80{
81 assert(strides.size() == 2);
82 stride->vertical(strides[0]);
83 stride->horizontal(strides[1]);
84}
85
86loco::Permutation<loco::Domain::Feature> createFeaturePermutation(mir::DataFormat format)
87{
89 if (format == mir::DataFormat::NHWC)
90 {
91 perm.axis(loco::FeatureAxis::Count) = 0;
92 perm.axis(loco::FeatureAxis::Height) = 1;
93 perm.axis(loco::FeatureAxis::Width) = 2;
94 perm.axis(loco::FeatureAxis::Depth) = 3;
95 }
96 else
97 {
98 assert(format == mir::DataFormat::NCHW);
99 perm.axis(loco::FeatureAxis::Count) = 0;
100 perm.axis(loco::FeatureAxis::Depth) = 1;
101 perm.axis(loco::FeatureAxis::Height) = 2;
102 perm.axis(loco::FeatureAxis::Width) = 3;
103 }
104 return perm;
105}
106
107std::unique_ptr<loco::FeatureEncoder> createFeatureEncoder(mir::DataFormat data_format)
108{
109 auto perm = createFeaturePermutation(data_format);
110 return std::make_unique<loco::PermutingEncoder<loco::Domain::Feature>>(perm);
111}
112
113std::unique_ptr<loco::FeatureDecoder> createFeatureDecoder(mir::DataFormat data_format)
114{
115 auto perm = createFeaturePermutation(data_format);
116 return std::make_unique<loco::PermutingDecoder<loco::Domain::Feature>>(perm);
117}
118
119std::unique_ptr<loco::FilterEncoder> createOHWIFilterEncoder()
120{
122 perm.axis(loco::FilterAxis::Count) = 0;
123 perm.axis(loco::FilterAxis::Height) = 1;
124 perm.axis(loco::FilterAxis::Width) = 2;
125 perm.axis(loco::FilterAxis::Depth) = 3;
126 return std::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
127}
128
129std::unique_ptr<loco::FilterEncoder> createHWOIFilterEncoder()
130{
132 perm.axis(loco::FilterAxis::Height) = 0;
133 perm.axis(loco::FilterAxis::Width) = 1;
134 perm.axis(loco::FilterAxis::Count) = 2;
135 perm.axis(loco::FilterAxis::Depth) = 3;
136 return std::make_unique<loco::PermutingEncoder<loco::Domain::Filter>>(perm);
137}
138
139std::unique_ptr<loco::DepthwiseFilterEncoder> createHWIMDepthwiseFilterEncoder()
140{
146 return std::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
147}
148
149std::unique_ptr<loco::DepthwiseFilterEncoder> createIHWMDepthwiseFilterEncoder()
150{
156 return std::make_unique<loco::PermutingEncoder<loco::Domain::DepthwiseFilter>>(perm);
157}
158
159std::unique_ptr<loco::MatrixEncoder> createHWMatrixEncoder()
160{
162 perm.axis(loco::MatrixAxis::Height) = 0;
163 perm.axis(loco::MatrixAxis::Width) = 1;
164 return std::make_unique<loco::PermutingEncoder<loco::Domain::Matrix>>(perm);
165}
166
167std::unique_ptr<loco::MatrixDecoder> createHWMatrixDecoder()
168{
170 perm.axis(loco::MatrixAxis::Height) = 0;
171 perm.axis(loco::MatrixAxis::Width) = 1;
172 return std::make_unique<loco::PermutingDecoder<loco::Domain::Matrix>>(perm);
173}
174
176{
177 switch (data_type)
178 {
179 case mir::DataType::UNKNOWN:
180 return loco::DataType::Unknown;
181 case mir::DataType::FLOAT32:
182 return loco::DataType::FLOAT32;
183 case mir::DataType::FLOAT64:
184 return loco::DataType::FLOAT64;
185 case mir::DataType::INT32:
186 return loco::DataType::S32;
187 case mir::DataType::INT64:
188 return loco::DataType::S64;
189 default:
190 break;
191 }
192 throw std::runtime_error("Unsupported data type");
193}
194
195loco::Node *createBroadcastIfNeeded(loco::Node *node, const mir::Shape &shape,
196 const mir::Shape &out_shape)
197{
198 auto graph = node->graph();
199
200 if (shape == out_shape)
201 return node; // not needed
202
203 int32_t out_rank = out_shape.rank();
204 int32_t rank_diff = out_rank - shape.rank();
205 // Create Broadcast
206 auto *broadcast = graph->nodes()->create<loco::TensorBroadcast>();
207 // Create Reshape for equal ranks
208 if (shape.rank() != out_rank)
209 {
210 auto *reshape = graph->nodes()->create<loco::FixedReshape>();
211 reshape->input(node);
212 reshape->rank(out_rank);
213 broadcast->input(reshape);
214 // Set reshape dims
215 for (int32_t dim = 0; dim < out_rank; dim++)
216 {
217 if (dim < rank_diff)
218 reshape->dim(dim) = 1;
219 else
220 reshape->dim(dim) = shape.dim(dim - rank_diff);
221 }
222 }
223 else
224 {
225 broadcast->input(node);
226 }
227 // Flag if no one dim isn't equal
228 bool compatible_shapes = true;
229 for (int32_t dim = 0; dim < out_rank; dim++)
230 {
231 // Set broadcast mapping
232 if (dim < rank_diff || (shape.dim(dim - rank_diff) == 1 && out_shape.dim(dim) != 1))
233 broadcast->mapping()->dim(dim) = out_shape.dim(dim);
234 // Check compatibility
235 if (dim >= rank_diff && shape.dim(dim - rank_diff) != 1 &&
236 shape.dim(dim - rank_diff) != out_shape.dim(dim))
237 compatible_shapes = false;
238 }
239 // Check compatibility
240 if (!compatible_shapes)
241 throw std::runtime_error("Not compatible shapes for broadcasting!");
242
243 return broadcast;
244}
245
246template <typename NodeType>
247NodeType *createEltwiseBinary(const mir::ops::BinaryElementwiseOp &op, loco::Node *lhs,
248 loco::Node *rhs)
249{
250 auto graph = lhs->graph();
251
252 const auto &lhs_shape = op.getInput(0)->getShape();
253 const auto &rhs_shape = op.getInput(1)->getShape();
254 const auto &out_shape = op.getOutputShape(0);
255 // Create Broadcast if it's needed
256 auto lhs_node = createBroadcastIfNeeded(lhs, lhs_shape, out_shape);
257 auto rhs_node = createBroadcastIfNeeded(rhs, rhs_shape, out_shape);
258 // Create Node
259 auto result = graph->nodes()->create<NodeType>();
260 result->lhs(lhs_node);
261 result->rhs(rhs_node);
262 return result;
263}
264} // namespace
265
267{
268 // Get Input
269 auto lhs = _mir2loco_map.at(op.getInput(0));
270 auto rhs = _mir2loco_map.at(op.getInput(1));
271 auto result = createEltwiseBinary<loco::EltwiseAdd>(op, lhs, rhs);
272 // Not set Shape
273 // Add to map
274 _mir2loco_map.emplace(op.getOutput(0), result);
275}
276
278{
279 loco::Node *input = _mir2loco_map.at(op.getInput(0));
280
281 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
282 encoded_input->input(input);
283 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
284
285 auto *avg_pool_node = _loco_graph->nodes()->create<loco::AvgPool2D>();
286 avg_pool_node->ifm(encoded_input);
287 avg_pool_node->convention(op.getIncludePad() ? loco::AvgPool2D::Convention::Full
289 setupWindow(op.getWindowSize(), avg_pool_node->window());
290 setupStride(op.getStrides(), avg_pool_node->stride());
291 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), avg_pool_node->pad());
292
293 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
294 output->input(avg_pool_node);
295 output->decoder(createFeatureDecoder(op.getDataFormat()));
296
297 _mir2loco_map.emplace(op.getOutput(0), output);
298}
299
301{
302 if (op.getNumInputs() < 2)
303 throw std::runtime_error("Not enough tensors for concatenation!");
304
305 loco::Node *last_concat = nullptr;
306
307 for (std::size_t i = 1; i < op.getNumInputs(); i++)
308 {
309 loco::Node *lhs = last_concat;
310 if (lhs == nullptr)
311 {
312 mir::Operation::Output *mir_lhs = op.getInput(i - 1);
313 lhs = _mir2loco_map.at(mir_lhs);
314 }
315 mir::Operation::Output *mir_rhs = op.getInput(i);
316 loco::Node *rhs = _mir2loco_map.at(mir_rhs);
317 // Create TensorConcat
318 auto concat_node = _loco_graph->nodes()->create<loco::TensorConcat>();
319 // Set inputs
320 concat_node->lhs(lhs);
321 concat_node->rhs(rhs);
322 // Set axis
323 concat_node->axis(op.getAxis());
324 // Set last concat
325 last_concat = concat_node;
326 }
327 // Not set Shape
328 // Add to map
329 _mir2loco_map.emplace(op.getOutput(0), last_concat);
330}
331
333{
334 auto const_node = _loco_graph->nodes()->create<loco::ConstGen>();
335 // Not set Input
336 // Set Shape
337 const auto &out_shape = op.getOutputShape(0);
338 setupShape(out_shape, const_node);
339 // Copy value
340 const auto &value = op.getValue();
341 const_node->dtype(convertDataType(value.getElementType()));
342 // TODO Support other data types
343 switch (const_node->dtype())
344 {
345 case loco::DataType::FLOAT32:
346 {
347 const_node->size<loco::DataType::FLOAT32>(out_shape.numElements());
348 float &const_float = const_node->at<loco::DataType::FLOAT32>(0);
349 char *loco_ptr = reinterpret_cast<char *>(&const_float);
350 char *mir_ptr = value.at(mir::Index(out_shape.rank()));
351 std::memcpy(loco_ptr, mir_ptr, out_shape.numElements() * sizeof(float));
352 break;
353 }
354 case loco::DataType::FLOAT64:
355 {
356 // TODO Change that when loco support other DataTypeImpl
357 const_node->dtype(loco::DataType::FLOAT32);
358 const_node->size<loco::DataType::FLOAT32>(out_shape.numElements());
359 float &const_float = const_node->at<loco::DataType::FLOAT32>(0);
360 char *mir_ptr = value.at(mir::Index(out_shape.rank()));
361 double *mir_double = reinterpret_cast<double *>(mir_ptr);
362 float *loco_float = &const_float;
363 for (const mir::Index &idx : mir::ShapeRange(out_shape))
364 {
365 *loco_float = static_cast<float>(*mir_double);
366 loco_float++;
367 mir_double++;
368 }
369 break;
370 }
371 case loco::DataType::S32:
372 {
373 const_node->size<loco::DataType::S32>(out_shape.numElements());
374 int32_t &const_int32 = const_node->at<loco::DataType::S32>(0);
375 char *loco_ptr = reinterpret_cast<char *>(&const_int32);
376 char *mir_ptr = value.at(mir::Index(out_shape.rank()));
377 std::memcpy(loco_ptr, mir_ptr, out_shape.numElements() * sizeof(int32_t));
378 break;
379 }
380 case loco::DataType::S64:
381 {
382 // TODO Change that when loco support other DataTypeImpl
383 const_node->dtype(loco::DataType::S32);
384 const_node->size<loco::DataType::S32>(out_shape.numElements());
385 int32_t &const_int32 = const_node->at<loco::DataType::S32>(0);
386 char *mir_ptr = value.at(mir::Index(out_shape.rank()));
387 int64_t *mir_int64 = reinterpret_cast<int64_t *>(mir_ptr);
388 int32_t *loco_int32 = &const_int32;
389 for (const mir::Index &idx : mir::ShapeRange(out_shape))
390 {
391 *loco_int32 = static_cast<float>(*mir_int64);
392 loco_int32++;
393 mir_int64++;
394 }
395 break;
396 }
397 default:
398 std::runtime_error("Unsupported data type");
399 }
400 // Add to map
401 _mir2loco_map.emplace(op.getOutput(0), const_node);
402}
403
405{
406 mir::Operation::Output *mir_input = op.getInput(0);
407 mir::Operation::Output *mir_filter = op.getInput(1);
408
409 loco::Node *input = _mir2loco_map.at(mir_input);
410 loco::Node *filter = _mir2loco_map.at(mir_filter);
411
412 // loco does not have grouped Conv2D operation. Try to translate into something else.
413 if (op.getNumGroups() != 1)
414 {
415 const std::int32_t group_size = mir_filter->getShape().dim(3);
416 const std::int32_t num_in_channels = group_size * op.getNumGroups();
417 const std::int32_t num_out_channels = mir_filter->getShape().dim(0);
418
419 // If the size of the group is 1, translate the operation into DepthwiseConv2D. Limit ourselves
420 // with the case of 'multiplier' == 1 for now.
421 if (group_size == 1 && (num_out_channels == num_in_channels))
422 {
423 // [O, H, W, I / group] == [I, H, W, M].
424 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
425 encoded_input->input(input);
426 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
427
428 auto *encoded_filter = _loco_graph->nodes()->create<loco::DepthwiseFilterEncode>();
429 encoded_filter->input(filter);
430 encoded_filter->encoder(createIHWMDepthwiseFilterEncoder());
431
432 auto *dw_conv2d_node = _loco_graph->nodes()->create<loco::DepthwiseConv2D>();
433 dw_conv2d_node->ifm(encoded_input);
434 dw_conv2d_node->ker(encoded_filter);
435 setupStride(op.getStrides(), dw_conv2d_node->stride());
436 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), dw_conv2d_node->pad());
437
438 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
439 output->input(dw_conv2d_node);
440 output->decoder(createFeatureDecoder(op.getDataFormat()));
441
442 _mir2loco_map.emplace(op.getOutput(0), output);
443 }
444 else
445 {
446 // There are few things we can do here:
447 // 1) If group_size == 1, reshape the kernel [O, H, W, I / group] == [I * M, H, W, 1] ->
448 // [I, M, H, W] and use DepthwiseConv2D.
449 // 2) Split the operation into smaller Conv2Ds.
450 // 3) Replicate the filter along 'O' axis 'num_groups' times, zero out some elements, and use
451 // ordinary Conv2D.
452 throw std::runtime_error("Grouped Conv2D operation is not fully supported.");
453 }
454 }
455 else
456 {
457 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
458 encoded_input->input(input);
459 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
460
461 auto *encoded_filter = _loco_graph->nodes()->create<loco::FilterEncode>();
462 encoded_filter->input(filter);
463 encoded_filter->encoder(createOHWIFilterEncoder());
464
465 auto *conv2d_node = _loco_graph->nodes()->create<loco::Conv2D>();
466 conv2d_node->ifm(encoded_input);
467 conv2d_node->ker(encoded_filter);
468 setupStride(op.getStrides(), conv2d_node->stride());
469 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), conv2d_node->pad());
470
471 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
472 output->input(conv2d_node);
473 output->decoder(createFeatureDecoder(op.getDataFormat()));
474
475 _mir2loco_map.emplace(op.getOutput(0), output);
476 }
477}
478
480{
481 mir::Operation::Output *mir_input = op.getInput(0);
482 mir::Operation::Output *mir_filter = op.getInput(1);
483
484 loco::Node *input = _mir2loco_map.at(mir_input);
485 loco::Node *filter = _mir2loco_map.at(mir_filter);
486
487 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
488 encoded_input->input(input);
489 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
490
491 auto *encoded_filter = _loco_graph->nodes()->create<loco::FilterEncode>();
492 encoded_filter->input(filter);
493 encoded_filter->encoder(createHWOIFilterEncoder());
494
495 auto *tr_conv2d_node = _loco_graph->nodes()->create<loco::TransposedConv2D>();
496 tr_conv2d_node->ifm(encoded_input);
497 tr_conv2d_node->ker(encoded_filter);
498 setupStride(op.getStrides(), tr_conv2d_node->stride());
500 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), tr_conv2d_node->pad());
501 else
502 throw std::runtime_error("Not supported non explicit paddings on loco!");
503
504 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
505 output->input(tr_conv2d_node);
506 output->decoder(createFeatureDecoder(op.getDataFormat()));
507
508 _mir2loco_map.emplace(op.getOutput(0), output);
509}
510
512{
513 mir::Operation::Output *mir_input = op.getInput(0);
514 mir::Operation::Output *mir_filter = op.getInput(1);
515
516 loco::Node *input = _mir2loco_map.at(mir_input);
517 loco::Node *filter = _mir2loco_map.at(mir_filter);
518
519 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
520 encoded_input->input(input);
521 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
522
523 auto *encoded_filter = _loco_graph->nodes()->create<loco::DepthwiseFilterEncode>();
524 encoded_filter->input(filter);
525 encoded_filter->encoder(createHWIMDepthwiseFilterEncoder());
526
527 auto *dw_conv2d_node = _loco_graph->nodes()->create<loco::DepthwiseConv2D>();
528 dw_conv2d_node->ifm(encoded_input);
529 dw_conv2d_node->ker(encoded_filter);
530 setupStride(op.getStrides(), dw_conv2d_node->stride());
531 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), dw_conv2d_node->pad());
532
533 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
534 output->input(dw_conv2d_node);
535 output->decoder(createFeatureDecoder(op.getDataFormat()));
536
537 _mir2loco_map.emplace(op.getOutput(0), output);
538}
539
541{
542 // Get Input
543 loco::Node *lhs = _mir2loco_map.at(op.getInput(0));
544 loco::Node *rhs = _mir2loco_map.at(op.getInput(1));
545 auto result = createEltwiseBinary<loco::EltwiseDiv>(op, lhs, rhs);
546 // Not set Shape
547 // Add to map
548 _mir2loco_map.emplace(op.getOutput(0), result);
549}
550
552{
553 mir::Operation::Output *mir_lhs = op.getInput(0);
554 mir::Operation::Output *mir_rhs = op.getInput(1);
555 // Check 2D shape
556 assert(op.getInput(0)->getShape().rank() == 2);
557 assert(op.getInput(1)->getShape().rank() == 2);
558
559 loco::Node *lhs = _mir2loco_map.at(mir_lhs);
560 loco::Node *rhs = _mir2loco_map.at(mir_rhs);
561
562 auto *encoded_lhs = _loco_graph->nodes()->create<loco::MatrixEncode>();
563 encoded_lhs->input(lhs);
564 encoded_lhs->encoder(createHWMatrixEncoder());
565
566 auto *encoded_rhs = _loco_graph->nodes()->create<loco::MatrixEncode>();
567 encoded_rhs->input(rhs);
568 encoded_rhs->encoder(createHWMatrixEncoder());
569
570 auto *mat_mul = _loco_graph->nodes()->create<loco::MatMul>();
571 mat_mul->lhs(encoded_lhs);
572 mat_mul->rhs(encoded_rhs);
573
574 auto *output = _loco_graph->nodes()->create<loco::MatrixDecode>();
575 output->input(mat_mul);
576 output->decoder(createHWMatrixDecoder());
577
578 _mir2loco_map.emplace(op.getOutput(0), output);
579}
580
582{
583 mir::Operation::Output *mir_output = op.getOutput(0);
584
585 loco::GraphInput *graph_input = _loco_graph->inputs()->create();
586 graph_input->name(mir_output->getName());
587 graph_input->dtype(convertDataType(mir_output->getElementType()));
588
589 auto *pull_node = _loco_graph->nodes()->create<loco::Pull>();
590 setupShape(mir_output->getShape(), pull_node);
591
592 loco::link(graph_input, pull_node);
593
594 _mir2loco_map.emplace(mir_output, pull_node);
595}
596
598{
599 loco::Node *input = _mir2loco_map.at(op.getInput(0));
600
601 auto *encoded_input = _loco_graph->nodes()->create<loco::FeatureEncode>();
602 encoded_input->input(input);
603 encoded_input->encoder(createFeatureEncoder(op.getDataFormat()));
604
605 auto max_pool_node = _loco_graph->nodes()->create<loco::MaxPool2D>();
606 max_pool_node->ifm(encoded_input);
607 setupWindow(op.getWindowSize(), max_pool_node->window());
608 setupStride(op.getStrides(), max_pool_node->stride());
609 setupPad(op.getPaddingBefore(), op.getPaddingAfter(), max_pool_node->pad());
610
611 auto *output = _loco_graph->nodes()->create<loco::FeatureDecode>();
612 output->input(max_pool_node);
613 output->decoder(createFeatureDecoder(op.getDataFormat()));
614
615 _mir2loco_map.emplace(op.getOutput(0), output);
616}
617
619{
620 // Get Input
621 loco::Node *lhs = _mir2loco_map.at(op.getInput(0));
622 loco::Node *rhs = _mir2loco_map.at(op.getInput(1));
623 auto result = createEltwiseBinary<loco::EltwiseMul>(op, lhs, rhs);
624 // Not set Shape
625 // Add to map
626 _mir2loco_map.emplace(op.getOutput(0), result);
627}
628
630{
631 mir::Operation::Output *mir_input = op.getInput(0);
632 loco::Node *input = _mir2loco_map.at(mir_input);
633
634 loco::GraphOutput *graph_output = _loco_graph->outputs()->create();
635 graph_output->name(mir_input->getName());
636 graph_output->dtype(convertDataType(mir_input->getElementType()));
637 graph_output->shape(make_tensor_shape(mir_input->getShape()));
638
639 auto *push_node = _loco_graph->nodes()->create<loco::Push>();
640 push_node->from(input);
641
642 loco::link(graph_output, push_node);
643}
644
646{
647 loco::Node *input = _mir2loco_map.at(op.getInput(0));
648
649 auto relu_node = _loco_graph->nodes()->create<loco::ReLU>();
650 relu_node->input(input);
651 // Not set shape
652 // Add to map
653 _mir2loco_map.emplace(op.getOutput(0), relu_node);
654}
655
657{
658 loco::Node *input = _mir2loco_map.at(op.getInput(0));
659
660 auto reshape_node = _loco_graph->nodes()->create<loco::Reshape<loco::ReshapeType::Fixed>>();
661 reshape_node->input(input);
662 // Set Shape
663 auto &out_shape = op.getOutputShape(0);
664 setupShape(out_shape, reshape_node);
665 // Add to map
666 _mir2loco_map.emplace(op.getOutput(0), reshape_node);
667}
668
670{
671 loco::Node *input = _mir2loco_map.at(op.getInput(0));
672
673 auto softmax_node = _loco_graph->nodes()->create<loco::TensorSoftmax>();
674 softmax_node->input(input);
675 // Set Axis
676 softmax_node->axis(op.getAxis());
677 // Add to map
678 _mir2loco_map.emplace(op.getOutput(0), softmax_node);
679}
680
682{
683 // Get Input
684 loco::Node *lhs = _mir2loco_map.at(op.getInput(0));
685 loco::Node *rhs = _mir2loco_map.at(op.getInput(1));
686 auto result = createEltwiseBinary<loco::EltwiseSub>(op, lhs, rhs);
687 // Not set Shape
688 // Add to map
689 _mir2loco_map.emplace(op.getOutput(0), result);
690}
691
693{
694 loco::Node *input = _mir2loco_map.at(op.getInput(0));
695 const auto &axis_order = op.getAxisOrder();
696
697 auto transpose_node = _loco_graph->nodes()->create<loco::TensorTranspose>();
698 transpose_node->input(input);
699 // Set axis order
700 transpose_node->perm()->size(axis_order.size());
701 for (size_t i = 0; i < axis_order.size(); i++)
702 transpose_node->perm()->axis(i) = axis_order[i];
703 // Not set shape
704 // Add to map
705 _mir2loco_map.emplace(op.getOutput(0), transpose_node);
706}
707
708void Transformer::visit_fallback(mir::Operation &op) { throw std::runtime_error("NYI operation"); }
709
710std::unique_ptr<loco::Graph> Transformer::transform(mir::Graph *mir_graph)
711{
712 _mir2loco_map.clear();
713 _loco_graph.reset();
714 _loco_graph = loco::make_graph();
715
716 // Transform Nodes
717 mir_graph->accept(this);
718
719 // validate graph
720 assert(loco::valid(_loco_graph.get()));
721
722 return std::move(_loco_graph);
723}
724
725} // namespace mir2loco
2D Average Pooling
Definition Nodes.h:337
Node * ifm(void) const
Definition Nodes.h:349
Create a value from constant byte array.
Definition Nodes.h:218
2D Spatial Convolution
Definition Nodes.h:554
Node * ifm(void) const
Definition Nodes.h:556
Depthwise 2D Convolution.
Definition Nodes.h:582
Node * ifm(void) const
Definition Nodes.h:584
Create a depthwise filter from a tensor.
Definition Nodes.h:456
Node * input(void) const
Definition Nodes.h:458
Create a tensor from a feature map.
Definition Nodes.h:399
Create a feature map from a tensor.
Definition Nodes.h:380
Node * input(void) const
Definition Nodes.h:382
Create a filter from a tensor.
Definition Nodes.h:418
Node * input(void) const
Definition Nodes.h:420
Graph-level Input Metadata.
Definition Graph.h:107
Graph-level Output Metadata.
Definition Graph.h:135
Matrix Multiplication lhs and rhs.
Definition Nodes.h:1065
Node * lhs(void) const
Definition Nodes.h:1070
Create Tensor from Matrix.
Definition Nodes.h:1042
Create Matrix from Tensor.
Definition Nodes.h:1018
Node * input(void) const
Definition Nodes.h:1023
2D Max Pooling
Definition Nodes.h:305
Node * ifm(void) const
Definition Nodes.h:307
const DataType & dtype(void) const
Definition Graph.h:63
const TensorShape * shape(void) const
Definition Graph.h:76
Logical unit of computation.
Definition Node.h:54
Graph * graph(void)
Definition Node.h:70
uint32_t left(void) const
Definition Padding2D.h:49
uint32_t top(void) const
Definition Padding2D.h:41
uint32_t bottom(void) const
Definition Padding2D.h:45
uint32_t right(void) const
Definition Padding2D.h:53
Create a value from user data.
Definition Nodes.h:96
Make a value visible to user.
Definition Nodes.h:53
Node * from(void) const
Definition Nodes.h:58
Create a new value that rectifies its input.
Definition Nodes.h:159
Node * input(void) const
Definition Nodes.h:164
Reshape a tensor to another tensor whose shape is known at compile time.
Definition Nodes.h:517
Computes softmax activations for Tensor domain.
Definition Nodes.h:722
Node * input(void) const
Definition Nodes.h:727
Stride configuration for N-dimensional spatial operations.
Definition Stride.h:28
Duplicate elements along specified axes.
Definition Nodes.h:980
Concatenate two tensors.
Definition Nodes.h:533
Node * lhs(void) const
Definition Nodes.h:535
Permute an input.
Definition Nodes.h:1090
Node * input(void) const
Definition Nodes.h:1095
2D Transposed Convolution
Definition Nodes.h:688
Node * ifm(void) const
Definition Nodes.h:690
ND Receptive Field Shape.
Definition Window.h:30
void visit(mir::ops::AddOp &op) override
Definition mir2loco.cpp:266
std::unique_ptr< loco::Graph > transform(mir::Graph *mir_graph)
Definition mir2loco.cpp:710
void visit_fallback(mir::Operation &op) override
Definition mir2loco.cpp:708
void accept(IVisitor *visitor)
Definition Graph.cpp:84
Represents an output of a node.
Definition Operation.h:60
DataType getElementType() const
Definition Operation.h:98
const std::string & getName() const
Definition Operation.h:104
const Shape & getShape() const
Definition Operation.h:99
std::size_t getNumInputs() const
Definition Operation.h:128
Output * getInput(std::size_t index)
Definition Operation.h:137
Output * getOutput(std::size_t index)
Definition Operation.h:149
const Shape & getOutputShape(std::size_t index) const
Definition Operation.h:163
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
int32_t rank() const
Definition Shape.h:43
char * at(const Index &idx) const
const std::vector< std::int32_t > & getWindowSize() const
Definition AvgPool2DOp.h:45
const std::vector< std::int32_t > & getPaddingBefore() const
Definition AvgPool2DOp.h:49
const std::vector< std::int32_t > & getPaddingAfter() const
Definition AvgPool2DOp.h:51
DataFormat getDataFormat() const
Definition AvgPool2DOp.h:55
bool getIncludePad() const
Definition AvgPool2DOp.h:53
const std::vector< std::int32_t > & getStrides() const
Definition AvgPool2DOp.h:47
Description of tensor concatenation operation.
Definition ConcatOp.h:31
int32_t getAxis() const
Definition ConcatOp.h:44
const TensorVariant & getValue() const
Definition ConstantOp.h:36
DataFormat getDataFormat() const
Definition Conv2DOp.h:60
std::int32_t getNumGroups() const
Definition Conv2DOp.h:58
const std::vector< std::int32_t > & getPaddingAfter() const
Definition Conv2DOp.h:56
const std::vector< std::int32_t > & getPaddingBefore() const
Definition Conv2DOp.h:54
const std::vector< std::int32_t > & getStrides() const
Definition Conv2DOp.h:52
const std::vector< std::int32_t > & getPaddingBefore() const
Definition Deconv2DOp.h:62
PaddingType getPaddingType() const
Definition Deconv2DOp.h:60
const std::vector< std::int32_t > & getPaddingAfter() const
Definition Deconv2DOp.h:64
DataFormat getDataFormat() const
Definition Deconv2DOp.h:66
const std::vector< std::int32_t > & getStrides() const
Definition Deconv2DOp.h:58
const std::vector< std::int32_t > & getStrides() const
const std::vector< std::int32_t > & getPaddingAfter() const
DataFormat getDataFormat() const
const std::vector< std::int32_t > & getPaddingBefore() const
const std::vector< std::int32_t > & getPaddingBefore() const
Definition MaxPool2DOp.h:49
const std::vector< std::int32_t > & getPaddingAfter() const
Definition MaxPool2DOp.h:51
DataFormat getDataFormat() const
Definition MaxPool2DOp.h:53
const std::vector< std::int32_t > & getWindowSize() const
Definition MaxPool2DOp.h:45
const std::vector< std::int32_t > & getStrides() const
Definition MaxPool2DOp.h:47
description of softmax operation.
Definition SoftmaxOp.h:31
int32_t getAxis() const
Definition SoftmaxOp.h:43
Tensor transpose operation.
Definition TransposeOp.h:34
const std::vector< std::size_t > & getAxisOrder() const
Definition TransposeOp.h:38
result
Definition infer.py:103
void link(GraphOutput *, Push *push)
Definition Nodes.cpp:65
bool valid(Graph *g, std::unique_ptr< ErrorListener > &&l=nullptr)
Validate a loco graph.
Definition Verifier.cpp:100
DataType
"scalar" value type
Definition DataType.h:27
std::unique_ptr< Graph > make_graph(void)
Definition Graph.cpp:131
DataType
Definition DataType.h:27
DataFormat
Definition DataFormat.h:27
data_type convertDataType(const ir::DataType type)
Convert type of data from onert type to npu type.
Definition Convert.cc:26