18#include "schema_generated.h"
65 case tflite::Padding_VALID:
67 case tflite::Padding_SAME:
70 throw std::runtime_error(std::string(
"Unsupported Padding: ") +
71 tflite::EnumNamePadding(padding));
77 const std::vector<std::int32_t> &window_size,
78 const std::vector<std::int32_t> &strides,
79 std::vector<std::int32_t> &padding_before,
80 std::vector<std::int32_t> &padding_after)
82 constexpr int num_spatial_dims = 2;
83 assert(window_size.size() == num_spatial_dims);
84 assert(strides.size() == num_spatial_dims);
85 assert(padding_before.size() == num_spatial_dims);
86 assert(padding_after.size() == num_spatial_dims);
91 for (
int i = 0; i < num_spatial_dims; ++i)
94 const std::int32_t total_padding =
95 (input_shape.
dim(1 + i) % strides[i] == 0)
96 ? std::max(0, window_size[i] - strides[i])
97 : std::max(0, window_size[i] - input_shape.
dim(1 + i) % strides[i]);
98 padding_before[i] = total_padding / 2;
99 padding_after[i] = total_padding - padding_before[i];
103 for (
int i = 0; i < num_spatial_dims; ++i)
105 padding_before[i] = 0;
106 padding_after[i] = 0;
114template <
typename VectorT>
117 std::vector<VectorT> v;
119 v.emplace_back(static_cast<VectorT>(
tensor.at(i)));
126 if (constant_op ==
nullptr)
127 throw std::runtime_error(
"Non-constant input is not supported.");
128 return constant_op->getValue();
131std::vector<mir::Operation::Output *>
133 const std::vector<mir::Operation::Output *> &inputs)
135 auto input = inputs.at(0);
136 auto kernel = inputs.at(1);
137 auto bias = inputs.at(2);
140 attributes.
strides = {opts->stride_h, opts->stride_w};
142 const auto padding_type = convertPadding(opts->padding);
143 const auto &input_shape = input->getShape();
144 const auto &kernel_shape = kernel->getShape();
145 const auto &strides = attributes.
strides;
148 std::vector<std::int32_t> kernel_size{kernel_shape.dim(1), kernel_shape.dim(2)};
149 calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
152 if (input->getType().isQuantized())
154 result = createOp<ops::Conv2DOp>(input, kernel, bias, attributes)->getOutput(0);
158 result = createOp<ops::Conv2DOp>(input, kernel, attributes)->getOutput(0);
159 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
161 return {addFusedActivation(result, opts->fused_activation_function)};
164std::vector<mir::Operation::Output *>
166 const std::vector<mir::Operation::Output *> &inputs)
168 auto input = inputs.at(0);
169 auto kernel = inputs.at(1);
170 auto bias = inputs.at(2);
173 const std::vector<std::size_t> axis_order{1, 2, 3, 0};
174 kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
177 attributes.
strides = {opts->stride_h, opts->stride_w};
179 const auto padding_type = convertPadding(opts->padding);
180 const auto &input_shape = input->getShape();
181 const auto &kernel_shape = kernel->getShape();
182 std::vector<std::int32_t> kernel_size{kernel_shape.dim(0), kernel_shape.dim(1)};
183 const auto &strides = attributes.
strides;
186 calculatePadding(padding_type, input_shape, kernel_size, strides, pad_before, pad_after);
189 if (input->getType().isQuantized())
191 result = createOp<ops::DepthwiseConv2DOp>(input, kernel, bias, attributes)->getOutput(0);
195 result = createOp<ops::DepthwiseConv2DOp>(input, kernel, attributes)->getOutput(0);
196 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
198 return {addFusedActivation(result, opts->fused_activation_function)};
201std::vector<mir::Operation::Output *>
203 const std::vector<mir::Operation::Output *> &inputs)
205 auto result = createOp<ops::ConcatOp>(inputs, opts->axis);
206 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
209std::vector<mir::Operation::Output *>
211 const std::vector<mir::Operation::Output *> &inputs)
213 auto input = inputs.at(0);
215 const auto &input_shape = input->getShape();
218 attributes.
window = {opts->filter_height, opts->filter_width};
219 attributes.
strides = {opts->stride_h, opts->stride_w};
221 const auto padding_type = convertPadding(opts->padding);
222 const auto &window_size = attributes.
window;
223 const auto &strides = attributes.
strides;
226 calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
228 auto result = createOp<ops::MaxPool2DOp>(input, attributes);
229 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
232std::vector<mir::Operation::Output *>
234 const std::vector<mir::Operation::Output *> &inputs)
236 auto input = inputs.at(0);
238 const auto &input_shape = input->getShape();
241 attributes.
window = {opts->filter_height, opts->filter_width};
242 attributes.
strides = {opts->stride_h, opts->stride_w};
245 const auto padding_type = convertPadding(opts->padding);
246 const auto &window_size = attributes.
window;
247 const auto &strides = attributes.
strides;
250 calculatePadding(padding_type, input_shape, window_size, strides, pad_before, pad_after);
252 auto result = createOp<ops::AvgPool2DOp>(input, attributes);
253 return {addFusedActivation(result->getOutput(0), opts->fused_activation_function)};
256std::vector<mir::Operation::Output *>
258 const std::vector<mir::Operation::Output *> &inputs)
260 auto input = inputs.at(0);
263 assert(input->getShape().rank() == 2);
264 const int32_t axis = 1;
265 auto result = createOp<ops::SoftmaxOp>(input, axis);
266 return {result->getOutput(0)};
269std::vector<mir::Operation::Output *>
271 const std::vector<mir::Operation::Output *> &inputs)
273 auto input = inputs.at(0);
277 Shape starts(convertIntTensorToVector<int32_t>(begin_tensor));
278 Shape sizes(convertIntTensorToVector<int32_t>(size_tensor));
279 auto result = createOp<ops::SliceOp>(input, starts, sizes);
280 return {result->getOutput(0)};
283std::vector<mir::Operation::Output *>
285 const std::vector<mir::Operation::Output *> &inputs)
287 auto input = inputs.at(0);
291 Shape new_shape(opts->new_shape.size());
292 for (
int i = 0; i < static_cast<int>(opts->new_shape.size()); ++i)
294 new_shape.
dim(i) = opts->new_shape[i];
296 auto result = createOp<ops::ReshapeOp>(input, new_shape);
297 return {result->getOutput(0)};
300std::vector<mir::Operation::Output *>
302 const std::vector<mir::Operation::Output *> &inputs)
305 auto kernel = inputs.at(1);
306 auto input = inputs.at(2);
309 attributes.
strides = {opts->stride_h, opts->stride_w};
313 const std::vector<std::size_t> axis_order{1, 2, 0, 3};
314 kernel = createOp<ops::TransposeOp>(kernel, axis_order)->getOutput(0);
316 attributes.
padding_type = convertPadding(opts->padding);
317 auto result = createOp<ops::DeConv2DOp>(input, kernel, attributes,
output_shape)->getOutput(0);
321std::vector<mir::Operation::Output *>
323 const std::vector<mir::Operation::Output *> &inputs)
325 if (opts->align_corners)
326 throw std::runtime_error(
"'align_corners' is not currently supported");
328 auto input = inputs.at(0);
331 const auto &input_shape = input->getShape();
335 createOp<ops::ResizeOp>(input, ops::ResizeOp::ResizeMethod::nearestNeighbor, res_shape);
336 return {result->getOutput(0)};
339std::vector<mir::Operation::Output *>
341 const std::vector<mir::Operation::Output *> &inputs)
343 assert(inputs.size() == 2);
344 auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
345 return {addFusedActivation(result, opts->fused_activation_function)};
348std::vector<mir::Operation::Output *>
350 const std::vector<mir::Operation::Output *> &inputs)
352 assert(inputs.size() == 2);
353 auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
354 return {addFusedActivation(result, opts->fused_activation_function)};
357std::vector<mir::Operation::Output *>
359 const std::vector<mir::Operation::Output *> &inputs)
361 assert(inputs.size() == 2);
362 auto result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
363 return {addFusedActivation(result, opts->fused_activation_function)};
366std::vector<mir::Operation::Output *>
368 const std::vector<mir::Operation::Output *> &inputs)
370 assert(inputs.size() == 2);
371 auto result = createOp<ops::DivOp>(inputs[0], inputs[1])->getOutput(0);
372 return {addFusedActivation(result, opts->fused_activation_function)};
375std::vector<mir::Operation::Output *>
378 assert(inputs.size() == 2);
379 auto result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
383std::vector<mir::Operation::Output *>
386 assert(inputs.size() == 2);
387 auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
388 result = createOp<ops::MulOp>(result, result)->getOutput(0);
392std::vector<mir::Operation::Output *>
394 const std::vector<mir::Operation::Output *> &inputs)
396 auto input = inputs.at(0);
399 std::vector<int32_t> axes = convertIntTensorToVector<int32_t>(axes_tensor);
400 auto result = createOp<ops::ReduceMeanOp>(input, axes, opts->keep_dims);
401 return {result->getOutput(0)};
404std::vector<mir::Operation::Output *>
406 const std::vector<mir::Operation::Output *> &inputs)
408 auto input = inputs.at(0);
409 auto weights = inputs.at(1);
410 auto bias = inputs.at(2);
413 const auto &input_shape = input->getShape();
414 int32_t outer_size = input_shape.
dim(0);
415 int32_t inner_size = input_shape.
numElements() / outer_size;
416 auto flatten = createOp<ops::ReshapeOp>(input,
Shape{outer_size, inner_size})->getOutput(0);
419 const std::vector<std::size_t> axis_order{1, 0};
420 weights = createOp<ops::TransposeOp>(weights, axis_order)->getOutput(0);
423 if (input->getType().isQuantized())
425 result = createOp<ops::FullyConnectedOp>(flatten, weights, bias)->getOutput(0);
429 result = createOp<ops::FullyConnectedOp>(flatten, weights)->getOutput(0);
430 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
432 return {addFusedActivation(result, opts->fused_activation_function)};
437 tflite::ActivationFunctionType activation_type)
439 switch (activation_type)
441 case tflite::ActivationFunctionType_NONE:
443 case tflite::ActivationFunctionType_RELU:
444 return createOp<ops::ReluOp>(input)->getOutput(0);
445 case tflite::ActivationFunctionType_RELU6:
446 return createOp<ops::CappedReluOp>(input, 6)->getOutput(0);
447 case tflite::ActivationFunctionType_TANH:
448 return createOp<ops::TanhOp>(input)->getOutput(0);
450 throw std::runtime_error(std::string(
"Unsupported activation type: ") +
451 tflite::EnumNameActivationFunctionType(activation_type));
455std::vector<mir::Operation::Output *>
457 const std::vector<mir::Operation::Output *> &inputs)
459 auto input = inputs.at(0);
461 std::vector<int32_t> squeeze_dims(opts->squeeze_dims.begin(), opts->squeeze_dims.end());
462 auto result = createOp<ops::SqueezeOp>(input, squeeze_dims);
463 return {result->getOutput(0)};
466std::vector<mir::Operation::Output *>
468 const std::vector<mir::Operation::Output *> &inputs)
470 auto input = inputs.at(0);
473 const auto &input_shape = input->getShape();
474 const int num_dims = input_shape.
rank();
477 for (
int i = 0; i < num_dims; i++)
483 auto result = createOp<ops::PadOp>(input, attributes)->getOutput(0);
487std::vector<mir::Operation::Output *>
490 auto input = inputs.at(0);
492 auto result = createOp<ops::TanhOp>(input);
493 return {result->getOutput(0)};
496std::vector<mir::Operation::Output *>
499 auto input = inputs.at(0);
501 auto result = createOp<ops::ReluOp>(input);
502 return {result->getOutput(0)};
505std::vector<mir::Operation::Output *>
508 auto input = inputs.at(0);
510 auto result = createOp<ops::CappedReluOp>(input, 6);
511 return {result->getOutput(0)};
514std::vector<mir::Operation::Output *>
517 auto input = inputs.at(0);
519 const float one_value = 1.0f;
521 auto one = createOp<ops::ConstantOp>(one_tensor)->getOutput(0);
522 auto sqrt = createOp<ops::SqrtOp>(input)->getOutput(0);
523 auto result = createOp<ops::DivOp>(one, sqrt)->getOutput(0);
527std::vector<mir::Operation::Output *>
530 auto input = inputs.at(0);
532 auto result = createOp<ops::SqrtOp>(input)->getOutput(0);
536std::vector<mir::Operation::Output *>
539 auto input = inputs.at(0);
541 auto result = createOp<ops::SigmoidOp>(input);
542 return {result->getOutput(0)};
545std::vector<mir::Operation::Output *>
547 const std::vector<mir::Operation::Output *> &inputs)
549 auto input = inputs.at(0);
552 std::vector<std::size_t> axis_order = convertIntTensorToVector<std::size_t>(perm_tensor);
553 auto result = createOp<ops::TransposeOp>(input, axis_order);
554 return {result->getOutput(0)};
557std::vector<mir::Operation::Output *>
559 const std::vector<mir::Operation::Output *> &inputs)
561 if (opts->ellipsis_mask != 0)
562 throw std::runtime_error(
"StridedSlice: parameter 'ellipsis_mask' is not supported.");
564 if (opts->new_axis_mask != 0)
565 throw std::runtime_error(
"StridedSlice: parameter 'new_axis_mask' is not supported.");
567 auto input = inputs.at(0);
572 std::vector<int32_t>
begin = convertIntTensorToVector<int32_t>(begin_tensor);
573 std::vector<int32_t> end = convertIntTensorToVector<int32_t>(end_tensor);
574 std::vector<int32_t> strides = convertIntTensorToVector<int32_t>(strides_tensor);
576 int32_t begin_mask = opts->begin_mask;
577 int32_t end_mask = opts->end_mask;
578 int32_t shrink_axis_mask = opts->shrink_axis_mask;
580 const auto &input_shape = input->getShape();
581 int32_t num_dims = input_shape.
rank();
583 for (int32_t stride : strides)
586 throw std::runtime_error(
"StridedSlice: parameter 'strides' is not supported");
589 Shape start(num_dims);
591 std::vector<int32_t> squeeze_dims;
592 for (
int axis = 0; axis < num_dims; axis++)
594 if (
static_cast<uint32_t
>(begin_mask) & (1u <<
static_cast<uint32_t
>(axis)))
597 start.
dim(axis) =
begin.at(
static_cast<uint64_t
>(axis));
599 if (
static_cast<uint32_t
>(end_mask) & (1u <<
static_cast<uint32_t
>(axis)))
600 size.dim(axis) = input_shape.
dim(axis) - start.
dim(axis);
602 size.dim(axis) = end.at(
static_cast<uint64_t
>(axis)) - start.
dim(axis);
604 if (
static_cast<uint32_t
>(shrink_axis_mask) & (1u <<
static_cast<uint32_t
>(axis)))
605 squeeze_dims.push_back(axis);
608 auto result = createOp<ops::SliceOp>(input, start,
size);
609 result = createOp<ops::SqueezeOp>(result->getOutput(0), squeeze_dims);
610 return {result->getOutput(0)};
613std::vector<mir::Operation::Output *>
615 const std::vector<mir::Operation::Output *> &inputs)
617 auto input = inputs.at(0);
619 auto result = createOp<ops::LeakyReluOp>(input, opts->alpha);
620 return {result->getOutput(0)};
623std::vector<mir::Operation::Output *>
625 const std::vector<mir::Operation::Output *> &inputs)
627 if (opts->out_type != tflite::TensorType_INT32)
629 throw std::runtime_error(std::string(
"SHAPE: Unsupported tensor type: ") +
630 EnumNameTensorType(opts->out_type));
633 const auto &input_shape = inputs[0]->getShape();
634 int32_t rank = input_shape.
rank();
635 std::vector<int32_t> data;
636 data.reserve(
static_cast<uint64_t
>(rank));
637 for (int32_t i = 0; i < rank; i++)
638 data.emplace_back(input_shape.
dim(i));
640 auto result = createOp<ops::ConstantOp>(tensor);
641 return {result->getOutput(0)};
644std::vector<mir::Operation::Output *>
646 const std::vector<mir::Operation::Output *> &inputs)
648 auto result = createOp<ops::HardSwishOp>(inputs[0])->getOutput(0);
Represents an output of a node.
int32_t & dim(int32_t axis) noexcept
int32_t numElements() const
T at(const Index &id) const
std::vector< mir::Operation::Output * > convertSquaredDifference(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertFullyConnected(const tflite::FullyConnectedOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMean(const tflite::ReducerOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertResizeNearestNeighbor(const tflite::ResizeNearestNeighborOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMaxPool2D(const tflite::Pool2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertPad(const tflite::PadOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSoftmax(const tflite::SoftmaxOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertLeakyReLU(const tflite::LeakyReluOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTanh(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSlice(const tflite::SliceOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDiv(const tflite::DivOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReLU6(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertStridedSlice(const tflite::StridedSliceOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTransposeConv(const tflite::TransposeConvOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReLU(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertShape(const tflite::ShapeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertAveragePool2D(const tflite::Pool2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMul(const tflite::MulOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertMax(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConv2D(const tflite::Conv2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConcatenation(const tflite::ConcatenationOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertHardSwish(const tflite::HardSwishOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReshape(const tflite::ReshapeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertTranspose(const tflite::TransposeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSub(const tflite::SubOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertRsqrt(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDepthwiseConv2D(const tflite::DepthwiseConv2DOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSqrt(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertLogistic(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSqueeze(const tflite::SqueezeOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertAdd(const tflite::AddOptionsT *opts, const std::vector< mir::Operation::Output * > &inputs)
const luci_interpreter::RuntimeShape output_shape
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
std::vector< std::int32_t > window
std::vector< std::int32_t > padding_before
std::vector< std::int32_t > padding_after
std::vector< std::int32_t > strides
std::vector< std::int32_t > padding_after
std::vector< std::int32_t > strides
std::vector< std::int32_t > padding_before
ops::PaddingType padding_type
std::vector< std::int32_t > strides
std::vector< std::int32_t > window
std::vector< std::int32_t > padding_after
std::vector< std::int32_t > strides
std::vector< std::int32_t > padding_before
std::vector< std::int32_t > padding_after
std::vector< std::int32_t > padding_before