ONE - On-device Neural Engine
Loading...
Searching...
No Matches
tflite_op_creator.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "tflite_op_creator.h"
18#include "schema_generated.h"
19
20#include "mir/ops/AddOp.h"
21#include "mir/ops/AvgPool2DOp.h"
23#include "mir/ops/ConcatOp.h"
24#include "mir/ops/ConstantOp.h"
25#include "mir/ops/Conv2DOp.h"
26#include "mir/ops/Deconv2DOp.h"
28#include "mir/ops/DivOp.h"
30#include "mir/ops/HardSwishOp.h"
31#include "mir/ops/LeakyReluOp.h"
32#include "mir/ops/MaxOp.h"
33#include "mir/ops/MaxPool2DOp.h"
34#include "mir/ops/MulOp.h"
35#include "mir/ops/PadOp.h"
37#include "mir/ops/ReluOp.h"
38#include "mir/ops/ReshapeOp.h"
39#include "mir/ops/ResizeOp.h"
40#include "mir/ops/SigmoidOp.h"
41#include "mir/ops/SliceOp.h"
42#include "mir/ops/SoftmaxOp.h"
43#include "mir/ops/SqrtOp.h"
44#include "mir/ops/SqueezeOp.h"
45#include "mir/ops/SubOp.h"
46#include "mir/ops/TanhOp.h"
47#include "mir/ops/TransposeOp.h"
48
49#include "mir/Shape.h"
50#include "mir/ShapeRange.h"
51#include "mir/Tensor.h"
52
53#include <stdexcept>
54
55namespace mir_tflite
56{
57
58namespace ops = mir::ops;
59using mir::Shape;
60
61static mir::ops::PaddingType convertPadding(tflite::Padding padding)
62{
63 switch (padding)
64 {
65 case tflite::Padding_VALID:
67 case tflite::Padding_SAME:
69 default:
70 throw std::runtime_error(std::string("Unsupported Padding: ") +
71 tflite::EnumNamePadding(padding));
72 }
73}
74
75// TODO Move this to MIR?
76static void calculatePadding(mir::ops::PaddingType padding_type, const mir::Shape &input_shape,
77 const std::vector<std::int32_t> &window_size,
78 const std::vector<std::int32_t> &strides,
79 std::vector<std::int32_t> &padding_before,
80 std::vector<std::int32_t> &padding_after)
81{
82 constexpr int num_spatial_dims = 2;
83 assert(window_size.size() == num_spatial_dims);
84 assert(strides.size() == num_spatial_dims);
85 assert(padding_before.size() == num_spatial_dims);
86 assert(padding_after.size() == num_spatial_dims);
87
88 switch (padding_type)
89 {
91 for (int i = 0; i < num_spatial_dims; ++i)
92 {
93 // Assuming NHWC format.
94 const std::int32_t total_padding =
95 (input_shape.dim(1 + i) % strides[i] == 0)
96 ? std::max(0, window_size[i] - strides[i])
97 : std::max(0, window_size[i] - input_shape.dim(1 + i) % strides[i]);
98 padding_before[i] = total_padding / 2;
99 padding_after[i] = total_padding - padding_before[i];
100 }
101 break;
103 for (int i = 0; i < num_spatial_dims; ++i)
104 {
105 padding_before[i] = 0;
106 padding_after[i] = 0;
107 }
108 break;
109 default:
110 assert(false);
111 }
112}
113
114template <typename VectorT>
115static std::vector<VectorT> convertIntTensorToVector(const mir::Tensor<int32_t> &tensor)
116{
117 std::vector<VectorT> v;
118 for (const auto &i : mir::ShapeRange(tensor.getShape()))
119 v.emplace_back(static_cast<VectorT>(tensor.at(i)));
120 return v;
121}
122
123static const mir::TensorVariant &extractTensor(const mir::Operation::Output *output)
124{
125 auto constant_op = dynamic_cast<const ops::ConstantOp *>(output->getNode());
126 if (constant_op == nullptr)
127 throw std::runtime_error("Non-constant input is not supported.");
128 return constant_op->getValue();
129}
130
131std::vector<mir::Operation::Output *>
132TFLiteOpCreator::convertConv2D(const tflite::Conv2DOptionsT *opts,
133 const std::vector<mir::Operation::Output *> &inputs)
134{
135 auto input = inputs.at(0);
136 auto kernel = inputs.at(1);
137 auto bias = inputs.at(2);
138
139 mir::Conv2DOpAttributes attributes;
140 attributes.strides = {opts->stride_h, opts->stride_w};
141
142 const auto padding_type = convertPadding(opts->padding);
143 const auto &input_shape = input->getShape();
144 const auto &kernel_shape = kernel->getShape();
145 const auto &strides = attributes.strides;
146 auto &pad_before = attributes.padding_before;
147 auto &pad_after = attributes.padding_after;
148 std::vector<std::int32_t> kernel_size{kernel_shape.dim(1), kernel_shape.dim(2)};
149 calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
150
152 if (input->getType().isQuantized())
153 {
154 result = createOp<ops::Conv2DOp>(input, kernel, bias, attributes)->getOutput(0);
155 }
156 else // TODO Fuse bias to other backends
157 {
158 result = createOp<ops::Conv2DOp>(input, kernel, attributes)->getOutput(0);
159 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
160 }
161 return {addFusedActivation(result, opts->fused_activation_function)};
162}
163
164std::vector<mir::Operation::Output *>
165TFLiteOpCreator::convertDepthwiseConv2D(const tflite::DepthwiseConv2DOptionsT *opts,
166 const std::vector<mir::Operation::Output *> &inputs)
167{
168 auto input = inputs.at(0);
169 auto kernel = inputs.at(1);
170 auto bias = inputs.at(2);
171
172 // OHWI -> HWIO
173 const std::vector<std::size_t> axis_order{1, 2, 3, 0};
174 kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
175
176 mir::Conv2DOpAttributes attributes;
177 attributes.strides = {opts->stride_h, opts->stride_w};
178
179 const auto padding_type = convertPadding(opts->padding);
180 const auto &input_shape = input->getShape();
181 const auto &kernel_shape = kernel->getShape();
182 std::vector<std::int32_t> kernel_size{kernel_shape.dim(0), kernel_shape.dim(1)};
183 const auto &strides = attributes.strides;
184 auto &pad_before = attributes.padding_before;
185 auto &pad_after = attributes.padding_after;
186 calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
187
189 if (input->getType().isQuantized())
190 {
191 result = createOp<ops::DepthwiseConv2DOp>(input, kernel, bias, attributes)->getOutput(0);
192 }
193 else // TODO Fuse bias to other backends
194 {
195 result = createOp<ops::DepthwiseConv2DOp>(input, kernel, attributes)->getOutput(0);
196 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
197 }
198 return {addFusedActivation(result, opts->fused_activation_function)};
199}
200
201std::vector<mir::Operation::Output *>
202TFLiteOpCreator::convertConcatenation(const tflite::ConcatenationOptionsT *opts,
203 const std::vector<mir::Operation::Output *> &inputs)
204{
205 auto result = createOp<ops::ConcatOp>(inputs, opts->axis);
206 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
207}
208
209std::vector<mir::Operation::Output *>
210TFLiteOpCreator::convertMaxPool2D(const tflite::Pool2DOptionsT *opts,
211 const std::vector<mir::Operation::Output *> &inputs)
212{
213 auto input = inputs.at(0);
214
215 const auto &input_shape = input->getShape();
216
218 attributes.window = {opts->filter_height, opts->filter_width};
219 attributes.strides = {opts->stride_h, opts->stride_w};
220
221 const auto padding_type = convertPadding(opts->padding);
222 const auto &window_size = attributes.window;
223 const auto &strides = attributes.strides;
224 auto &pad_before = attributes.padding_before;
225 auto &pad_after = attributes.padding_after;
226 calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
227
228 auto result = createOp<ops::MaxPool2DOp>(input, attributes);
229 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
230}
231
232std::vector<mir::Operation::Output *>
233TFLiteOpCreator::convertAveragePool2D(const tflite::Pool2DOptionsT *opts,
234 const std::vector<mir::Operation::Output *> &inputs)
235{
236 auto input = inputs.at(0);
237
238 const auto &input_shape = input->getShape();
239
241 attributes.window = {opts->filter_height, opts->filter_width};
242 attributes.strides = {opts->stride_h, opts->stride_w};
243 attributes.include_pad = false;
244
245 const auto padding_type = convertPadding(opts->padding);
246 const auto &window_size = attributes.window;
247 const auto &strides = attributes.strides;
248 auto &pad_before = attributes.padding_before;
249 auto &pad_after = attributes.padding_after;
250 calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
251
252 auto result = createOp<ops::AvgPool2DOp>(input, attributes);
253 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
254}
255
256std::vector<mir::Operation::Output *>
257TFLiteOpCreator::convertSoftmax(const tflite::SoftmaxOptionsT * /*opts*/,
258 const std::vector<mir::Operation::Output *> &inputs)
259{
260 auto input = inputs.at(0);
261
262 // Softmax in TFLite is always 2-D.
263 assert(input->getShape().rank() == 2);
264 const int32_t axis = 1;
265 auto result = createOp<ops::SoftmaxOp>(input, axis);
266 return {result->getOutput(0)};
267}
268
269std::vector<mir::Operation::Output *>
270TFLiteOpCreator::convertSlice(const tflite::SliceOptionsT * /*opts*/,
271 const std::vector<mir::Operation::Output *> &inputs)
272{
273 auto input = inputs.at(0);
274 mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
275 mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(2)));
276
277 Shape starts(convertIntTensorToVector<int32_t>(begin_tensor));
278 Shape sizes(convertIntTensorToVector<int32_t>(size_tensor));
279 auto result = createOp<ops::SliceOp>(input, starts, sizes);
280 return {result->getOutput(0)};
281}
282
283std::vector<mir::Operation::Output *>
284TFLiteOpCreator::convertReshape(const tflite::ReshapeOptionsT *opts,
285 const std::vector<mir::Operation::Output *> &inputs)
286{
287 auto input = inputs.at(0);
288
289 // TODO: we should also support "-1" values in new_shape, which means that correct
290 // shape values must be calculated. Better do it in the shape inference module.
291 Shape new_shape(opts->new_shape.size());
292 for (int i = 0; i < static_cast<int>(opts->new_shape.size()); ++i)
293 {
294 new_shape.dim(i) = opts->new_shape[i];
295 }
296 auto result = createOp<ops::ReshapeOp>(input, new_shape);
297 return {result->getOutput(0)};
298}
299
300std::vector<mir::Operation::Output *>
301TFLiteOpCreator::convertTransposeConv(const tflite::TransposeConvOptionsT *opts,
302 const std::vector<mir::Operation::Output *> &inputs)
303{
304 mir::Tensor<int32_t> output_shape_tensor(extractTensor(inputs.at(0)));
305 auto kernel = inputs.at(1);
306 auto input = inputs.at(2);
307
308 mir::Deconv2DOpAttributes attributes;
309 attributes.strides = {opts->stride_h, opts->stride_w};
310 Shape output_shape(convertIntTensorToVector<int32_t>(output_shape_tensor));
311
312 // OHWI -> HWOI
313 const std::vector<std::size_t> axis_order{1, 2, 0, 3};
314 kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
315
316 attributes.padding_type = convertPadding(opts->padding);
317 auto result = createOp<ops::DeConv2DOp>(input, kernel, attributes, output_shape)->getOutput(0);
318 return {result};
319}
320
321std::vector<mir::Operation::Output *>
322TFLiteOpCreator::convertResizeNearestNeighbor(const tflite::ResizeNearestNeighborOptionsT *opts,
323 const std::vector<mir::Operation::Output *> &inputs)
324{
325 if (opts->align_corners)
326 throw std::runtime_error("'align_corners' is not currently supported");
327
328 auto input = inputs.at(0);
329 mir::Tensor<int32_t> size_tensor(extractTensor(inputs.at(1)));
330
331 const auto &input_shape = input->getShape();
332 Shape res_shape{input_shape.dim(0), size_tensor.at(mir::Index{0}), size_tensor.at(mir::Index{1}),
333 input_shape.dim(3)};
334 auto result =
335 createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
336 return {result->getOutput(0)};
337}
338
339std::vector<mir::Operation::Output *>
340TFLiteOpCreator::convertAdd(const tflite::AddOptionsT *opts,
341 const std::vector<mir::Operation::Output *> &inputs)
342{
343 assert(inputs.size() == 2);
344 auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
345 return {addFusedActivation(result, opts->fused_activation_function)};
346}
347
348std::vector<mir::Operation::Output *>
349TFLiteOpCreator::convertSub(const tflite::SubOptionsT *opts,
350 const std::vector<mir::Operation::Output *> &inputs)
351{
352 assert(inputs.size() == 2);
353 auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
354 return {addFusedActivation(result, opts->fused_activation_function)};
355}
356
357std::vector<mir::Operation::Output *>
358TFLiteOpCreator::convertMul(const tflite::MulOptionsT *opts,
359 const std::vector<mir::Operation::Output *> &inputs)
360{
361 assert(inputs.size() == 2);
362 auto result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
363 return {addFusedActivation(result, opts->fused_activation_function)};
364}
365
366std::vector<mir::Operation::Output *>
367TFLiteOpCreator::convertDiv(const tflite::DivOptionsT *opts,
368 const std::vector<mir::Operation::Output *> &inputs)
369{
370 assert(inputs.size() == 2);
371 auto result = createOp<ops::DivOp>(inputs[0], inputs[1])->getOutput(0);
372 return {addFusedActivation(result, opts->fused_activation_function)};
373}
374
375std::vector<mir::Operation::Output *>
376TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output *> &inputs)
377{
378 assert(inputs.size() == 2);
379 auto result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
380 return {result};
381}
382
383std::vector<mir::Operation::Output *>
384TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs)
385{
386 assert(inputs.size() == 2);
387 auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
388 result = createOp<ops::MulOp>(result, result)->getOutput(0);
389 return {result};
390}
391
392std::vector<mir::Operation::Output *>
393TFLiteOpCreator::convertMean(const tflite::ReducerOptionsT *opts,
394 const std::vector<mir::Operation::Output *> &inputs)
395{
396 auto input = inputs.at(0);
397 mir::Tensor<int32_t> axes_tensor(extractTensor(inputs.at(1)));
398
399 std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
400 auto result = createOp<ops::ReduceMeanOp>(input, axes, opts->keep_dims);
401 return {result->getOutput(0)};
402}
403
404std::vector<mir::Operation::Output *>
405TFLiteOpCreator::convertFullyConnected(const tflite::FullyConnectedOptionsT *opts,
406 const std::vector<mir::Operation::Output *> &inputs)
407{
408 auto input = inputs.at(0);
409 auto weights = inputs.at(1);
410 auto bias = inputs.at(2);
411
412 // Flatten input to 2-D shape.
413 const auto &input_shape = input->getShape();
414 int32_t outer_size = input_shape.dim(0);
415 int32_t inner_size = input_shape.numElements() / outer_size;
416 auto flatten = createOp<ops::ReshapeOp>(input, Shape{outer_size, inner_size})->getOutput(0);
417
418 // Transpose the weights.
419 const std::vector<std::size_t> axis_order{1, 0};
420 weights = createOp<ops::TransposeOp>(weights, axis_order)->getOutput(0);
421
423 if (input->getType().isQuantized())
424 {
425 result = createOp<ops::FullyConnectedOp>(flatten, weights, bias)->getOutput(0);
426 }
427 else // TODO Fuse bias to other backends
428 {
429 result = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
430 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
431 }
432 return {addFusedActivation(result, opts->fused_activation_function)};
433}
434
436TFLiteOpCreator::addFusedActivation(mir::Operation::Output *input,
437 tflite::ActivationFunctionType activation_type)
438{
439 switch (activation_type)
440 {
441 case tflite::ActivationFunctionType_NONE:
442 return input;
443 case tflite::ActivationFunctionType_RELU:
444 return createOp<ops::ReluOp>(input)->getOutput(0);
445 case tflite::ActivationFunctionType_RELU6:
446 return createOp<ops::CappedReluOp>(input, 6)->getOutput(0);
447 case tflite::ActivationFunctionType_TANH:
448 return createOp<ops::TanhOp>(input)->getOutput(0);
449 default:
450 throw std::runtime_error(std::string("Unsupported activation type: ") +
451 tflite::EnumNameActivationFunctionType(activation_type));
452 }
453}
454
455std::vector<mir::Operation::Output *>
456TFLiteOpCreator::convertSqueeze(const tflite::SqueezeOptionsT *opts,
457 const std::vector<mir::Operation::Output *> &inputs)
458{
459 auto input = inputs.at(0);
460
461 std::vector<int32_t> squeeze_dims(opts->squeeze_dims.begin(), opts->squeeze_dims.end());
462 auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
463 return {result->getOutput(0)};
464}
465
466std::vector<mir::Operation::Output *>
467TFLiteOpCreator::convertPad(const tflite::PadOptionsT * /*opts*/,
468 const std::vector<mir::Operation::Output *> &inputs)
469{
470 auto input = inputs.at(0);
471 mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
472
473 const auto &input_shape = input->getShape();
474 const int num_dims = input_shape.rank();
475
476 mir::PadOpAttributes attributes(num_dims);
477 for (int i = 0; i < num_dims; i++)
478 {
479 attributes.padding_before[i] = paddings_tensor.at(mir::Index({i, 0}));
480 attributes.padding_after[i] = paddings_tensor.at(mir::Index({i, 1}));
481 }
482
483 auto result = createOp<ops::PadOp>(input, attributes)->getOutput(0);
484 return {result};
485}
486
487std::vector<mir::Operation::Output *>
488TFLiteOpCreator::convertTanh(const std::vector<mir::Operation::Output *> &inputs)
489{
490 auto input = inputs.at(0);
491
492 auto result = createOp<ops::TanhOp>(input);
493 return {result->getOutput(0)};
494}
495
496std::vector<mir::Operation::Output *>
497TFLiteOpCreator::convertReLU(const std::vector<mir::Operation::Output *> &inputs)
498{
499 auto input = inputs.at(0);
500
501 auto result = createOp<ops::ReluOp>(input);
502 return {result->getOutput(0)};
503}
504
505std::vector<mir::Operation::Output *>
506TFLiteOpCreator::convertReLU6(const std::vector<mir::Operation::Output *> &inputs)
507{
508 auto input = inputs.at(0);
509
510 auto result = createOp<ops::CappedReluOp>(input, 6);
511 return {result->getOutput(0)};
512}
513
514std::vector<mir::Operation::Output *>
515TFLiteOpCreator::convertRsqrt(const std::vector<mir::Operation::Output *> &inputs)
516{
517 auto input = inputs.at(0);
518
519 const float one_value = 1.0f;
520 mir::TensorVariant one_tensor({mir::DataType::FLOAT32, {}}, &one_value);
521 auto one = createOp<ops::ConstantOp>(one_tensor)->getOutput(0);
522 auto sqrt = createOp<ops::SqrtOp>(input)->getOutput(0);
523 auto result = createOp<ops::DivOp>(one, sqrt)->getOutput(0);
524 return {result};
525}
526
527std::vector<mir::Operation::Output *>
528TFLiteOpCreator::convertSqrt(const std::vector<mir::Operation::Output *> &inputs)
529{
530 auto input = inputs.at(0);
531
532 auto result = createOp<ops::SqrtOp>(input)->getOutput(0);
533 return {result};
534}
535
536std::vector<mir::Operation::Output *>
537TFLiteOpCreator::convertLogistic(const std::vector<mir::Operation::Output *> &inputs)
538{
539 auto input = inputs.at(0);
540
541 auto result = createOp<ops::SigmoidOp>(input);
542 return {result->getOutput(0)};
543}
544
545std::vector<mir::Operation::Output *>
546TFLiteOpCreator::convertTranspose(const tflite::TransposeOptionsT * /*opts*/,
547 const std::vector<mir::Operation::Output *> &inputs)
548{
549 auto input = inputs.at(0);
550 mir::Tensor<int32_t> perm_tensor(extractTensor(inputs.at(1)));
551
552 std::vector<std::size_t> axis_order = convertIntTensorToVector<std::size_t>(perm_tensor);
553 auto result = createOp<ops::TransposeOp>(input, axis_order);
554 return {result->getOutput(0)};
555}
556
557std::vector<mir::Operation::Output *>
558TFLiteOpCreator::convertStridedSlice(const tflite::StridedSliceOptionsT *opts,
559 const std::vector<mir::Operation::Output *> &inputs)
560{
561 if (opts->ellipsis_mask != 0)
562 throw std::runtime_error("StridedSlice: parameter 'ellipsis_mask' is not supported.");
563
564 if (opts->new_axis_mask != 0)
565 throw std::runtime_error("StridedSlice: parameter 'new_axis_mask' is not supported.");
566
567 auto input = inputs.at(0);
568 mir::Tensor<int32_t> begin_tensor(extractTensor(inputs.at(1)));
569 mir::Tensor<int32_t> end_tensor(extractTensor(inputs.at(2)));
570 mir::Tensor<int32_t> strides_tensor(extractTensor(inputs.at(3)));
571
572 std::vector<int32_t> begin = convertIntTensorToVector<int32_t>(begin_tensor);
573 std::vector<int32_t> end = convertIntTensorToVector<int32_t>(end_tensor);
574 std::vector<int32_t> strides = convertIntTensorToVector<int32_t>(strides_tensor);
575
576 int32_t begin_mask = opts->begin_mask;
577 int32_t end_mask = opts->end_mask;
578 int32_t shrink_axis_mask = opts->shrink_axis_mask;
579
580 const auto &input_shape = input->getShape();
581 int32_t num_dims = input_shape.rank();
582
583 for (int32_t stride : strides)
584 {
585 if (stride != 1)
586 throw std::runtime_error("StridedSlice: parameter 'strides' is not supported");
587 }
588
589 Shape start(num_dims);
590 Shape size(num_dims);
591 std::vector<int32_t> squeeze_dims;
592 for (int axis = 0; axis < num_dims; axis++)
593 {
594 if (static_cast<uint32_t>(begin_mask) & (1u << static_cast<uint32_t>(axis)))
595 start.dim(axis) = 0;
596 else
597 start.dim(axis) = begin.at(static_cast<uint64_t>(axis));
598
599 if (static_cast<uint32_t>(end_mask) & (1u << static_cast<uint32_t>(axis)))
600 size.dim(axis) = input_shape.dim(axis) - start.dim(axis);
601 else
602 size.dim(axis) = end.at(static_cast<uint64_t>(axis)) - start.dim(axis);
603
604 if (static_cast<uint32_t>(shrink_axis_mask) & (1u << static_cast<uint32_t>(axis)))
605 squeeze_dims.push_back(axis);
606 }
607
608 auto result = createOp<ops::SliceOp>(input, start, size);
609 result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
610 return {result->getOutput(0)};
611}
612
613std::vector<mir::Operation::Output *>
614TFLiteOpCreator::convertLeakyReLU(const tflite::LeakyReluOptionsT *opts,
615 const std::vector<mir::Operation::Output *> &inputs)
616{
617 auto input = inputs.at(0);
618
619 auto result = createOp<ops::LeakyReluOp>(input, opts->alpha);
620 return {result->getOutput(0)};
621}
622
623std::vector<mir::Operation::Output *>
624TFLiteOpCreator::convertShape(const tflite::ShapeOptionsT *opts,
625 const std::vector<mir::Operation::Output *> &inputs)
626{
627 if (opts->out_type != tflite::TensorType_INT32)
628 {
629 throw std::runtime_error(std::string("SHAPE: Unsupported tensor type: ") +
630 EnumNameTensorType(opts->out_type));
631 }
632
633 const auto &input_shape = inputs[0]->getShape();
634 int32_t rank = input_shape.rank();
635 std::vector<int32_t> data;
636 data.reserve(static_cast<uint64_t>(rank));
637 for (int32_t i = 0; i < rank; i++)
638 data.emplace_back(input_shape.dim(i));
639 mir::TensorVariant tensor({mir::DataType::INT32, {rank}}, data.data());
640 auto result = createOp<ops::ConstantOp>(tensor);
641 return {result->getOutput(0)};
642}
643
644std::vector<mir::Operation::Output *>
645TFLiteOpCreator::convertHardSwish(const tflite::HardSwishOptionsT *,
646 const std::vector<mir::Operation::Output *> &inputs)
647{
648 auto result = createOp<ops::HardSwishOp>(inputs[0])->getOutput(0);
649 return {result};
650}
651
652} // namespace mir_tflite
Represents an output of a node.
Definition Operation.h:60
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
int32_t numElements() const
Definition Shape.cpp:30
int32_t rank() const
Definition Shape.h:43
T at(const Index &id) const
Definition Tensor.h:31
std::vector< mir::Operation::Output * > convertSquaredDifference(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertFullyConnected(const tflite::FullyConnectedOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMean(const tflite::ReducerOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertResizeNearestNeighbor(const tflite::ResizeNearestNeighborOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMaxPool2D(const tflite::Pool2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertPad(const tflite::PadOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSoftmax(const tflite::SoftmaxOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertLeakyReLU(const tflite::LeakyReluOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTanh(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSlice(const tflite::SliceOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDiv(const tflite::DivOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReLU6(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertStridedSlice(const tflite::StridedSliceOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTransposeConv(const tflite::TransposeConvOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReLU(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertShape(const tflite::ShapeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertAveragePool2D(const tflite::Pool2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMul(const tflite::MulOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMax(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConv2D(const tflite::Conv2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConcatenation(const tflite::ConcatenationOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertHardSwish(const tflite::HardSwishOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReshape(const tflite::ReshapeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTranspose(const tflite::TransposeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSub(const tflite::SubOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertRsqrt(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDepthwiseConv2D(const tflite::DepthwiseConv2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSqrt(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertLogistic(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSqueeze(const tflite::SqueezeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertAdd(const tflite::AddOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
const luci_interpreter::RuntimeShape output_shape
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
int32_t size[5]
Definition Slice.cpp:35
int32_t begin[5]
Definition Slice.cpp:33
std::vector< std::int32_t > window
Definition Attributes.h:42
std::vector< std::int32_t > padding_before
Definition Attributes.h:44
std::vector< std::int32_t > padding_after
Definition Attributes.h:45
std::vector< std::int32_t > strides
Definition Attributes.h:43
std::vector< std::int32_t > padding_after
Definition Attributes.h:33
std::vector< std::int32_t > strides
Definition Attributes.h:31
std::vector< std::int32_t > padding_before
Definition Attributes.h:32
ops::PaddingType padding_type
Definition Attributes.h:69
std::vector< std::int32_t > strides
Definition Attributes.h:65
std::vector< std::int32_t > window
Definition Attributes.h:54
std::vector< std::int32_t > padding_after
Definition Attributes.h:57
std::vector< std::int32_t > strides
Definition Attributes.h:55
std::vector< std::int32_t > padding_before
Definition Attributes.h:56
std::vector< std::int32_t > padding_after
Definition Attributes.h:78
std::vector< std::int32_t > padding_before
Definition Attributes.h:77