ONE - On-device Neural Engine
Loading...
Searching...
No Matches
mir_onnx Namespace Reference

Data Structures

class  ConverterContext
 
class  ModelContext
 
class  NodeConverterRegistry
 

Functions

std::unique_ptr< mir::GraphimportModelFromBinaryFile (const std::string &filename)
 
std::unique_ptr< mir::GraphimportModelFromTextFile (const std::string &filename)
 
std::unique_ptr< mir::GraphloadModel (const std::string &filename)
 
template<typename T >
getAttributeValue (const onnx::AttributeProto &attribute)=delete
 
template<>
float getAttributeValue (const onnx::AttributeProto &attribute)
 
template<>
std::int64_t getAttributeValue (const onnx::AttributeProto &attribute)
 
template<>
std::string getAttributeValue (const onnx::AttributeProto &attribute)
 
template<>
onnx::TensorProto getAttributeValue (const onnx::AttributeProto &attribute)
 
template<>
std::vector< std::int32_t > getAttributeValue (const onnx::AttributeProto &attribute)
 
template<>
std::vector< std::int64_t > getAttributeValue (const onnx::AttributeProto &attribute)
 
const onnx::AttributeProto * findAttribute (const onnx::NodeProto &node, const std::string &name)
 
template<typename T >
getAttributeValue (const onnx::NodeProto &node, const std::string &name)
 
template<typename T >
getAttributeValue (const onnx::NodeProto &node, const std::string &name, T default_value)
 
void inferAutoPadding (const std::string &pad_type, const mir::Shape &input_shape, const std::vector< std::int32_t > &dilations, const std::vector< std::int32_t > &strides, const std::vector< std::int32_t > &window_size, std::vector< std::int32_t > &padding_before, std::vector< std::int32_t > &padding_after)
 
std::vector< std::int32_t > fixPads (const mir::Shape &input_shape, const std::vector< std::int32_t > &pads, const std::vector< std::int32_t > &strides, const std::vector< std::int32_t > &dilation, const std::vector< std::int32_t > &kernel_shape)
 
mir::Shape constantToShape (const mir::ops::ConstantOp *op)
 
mir::DataType onnxDataTypeToMirDataType (onnx::TensorProto::DataType type)
 
mir::TensorVariant createTensor (const onnx::TensorProto *tensor)
 
mir::OperationfoldConstants (mir::Graph *graph, mir::Operation *op)
 
template<typename OpType , typename... Types>
mir::OperationcreateOp (mir::Graph *graph, Types &&...args)
 
void registerSupportedOps ()
 
void convertAbsV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAbsV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAddV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAddV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAddV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAveragePoolV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAveragePoolV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertAveragePoolV10 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertBatchNormalizationV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertBatchNormalizationV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertBatchNormalizationV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertBatchNormalizationV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConcatV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConcatV4 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConstantV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConstantV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConstantV11 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConvV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertConvTransposeV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertDivV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertDropoutV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertDropoutV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertDropoutV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertDropoutV10 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertEqualV11 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertEqualV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertExpandV8 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertFlattenV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertFlattenV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGatherV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGemmV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGemmV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGemmV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGemmV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGemmV11 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGlobalAveragePoolV2 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGreaterV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertGreaterV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertIdentityV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertLessV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertLessV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMatMulV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMatMulV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxV8 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxPoolV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxPoolV8 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMaxPoolV10 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertMulV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertPadAttrName (const std::string &pad_attr_name, const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertPadV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertPadV2 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReciprocalV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReciprocalV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReduceMeanV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReluV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReluV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReshapeV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertReshapeV5 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertShapeV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSigmoidV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSigmoidV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSoftmaxV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSqrtV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSqrtV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSubV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSubV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSubV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertSumV8 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertTanhV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertTanhV6 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertTransposeV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertUnsqueezeV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertUpsampleV1 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertUpsampleV7 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 
void convertUpsampleV9 (const onnx::NodeProto &onnx_node, ConverterContext *context)
 

Variables

const int64_t firstUnknownOpset = 13
 

Function Documentation

◆ constantToShape()

mir::Shape mir_onnx::constantToShape ( const mir::ops::ConstantOp op)

Definition at line 48 of file ONNXHelpers.cpp.

49{
50 const auto &t = op->getValue();
51 mir::DataType d_type = t.getElementType();
52
53 if (t.getType().isQuantized())
54 throw std::runtime_error("unsupported data type of shape operator");
55
56 switch (d_type)
57 {
58 case mir::DataType::FLOAT32:
59 return constantToShapeT<float>(t);
60 break;
61 case mir::DataType::FLOAT64:
62 return constantToShapeT<double>(t);
63 break;
64 case mir::DataType::INT32:
65 return constantToShapeT<int32_t>(t);
66 break;
67 case mir::DataType::INT64:
68 return constantToShapeT<int64_t>(t);
69 break;
70 case mir::DataType::UINT8:
71 return constantToShapeT<uint8_t>(t);
72 break;
73 default:
74 throw std::runtime_error{"Unknown datatype in constant"};
75 break;
76 }
77}
DataType getElementType() const
const TensorVariant & getValue() const
Definition ConstantOp.h:36
DataType
Definition DataType.h:27

References mir::TensorVariant::getElementType(), and mir::ops::ConstantOp::getValue().

Referenced by convertExpandV8().

◆ convertAbsV1()

void mir_onnx::convertAbsV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 37 of file Abs.cpp.

38{
39 convertAbsGeneric(onnx_node, context);
40}

◆ convertAbsV6()

void mir_onnx::convertAbsV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 42 of file Abs.cpp.

43{
44 convertAbsGeneric(onnx_node, context);
45}

◆ convertAddV1()

void mir_onnx::convertAddV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Add.cpp.

28{
29 // consumed_inputs attribute not used
30 convertAddV6(onnx_node, context);
31}
void convertAddV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Add.cpp:33

References convertAddV6().

◆ convertAddV6()

void mir_onnx::convertAddV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 33 of file Add.cpp.

34{
35 // broadcast attribute not used
36 const auto *axis = findAttribute(onnx_node, "axis");
37 if (axis != nullptr)
38 throw std::runtime_error("Not supported axis attribute in Add operation!");
39
40 convertAddV7(onnx_node, context);
41}
void convertAddV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Add.cpp:43
const onnx::AttributeProto * findAttribute(const onnx::NodeProto &node, const std::string &name)

References convertAddV7(), and findAttribute().

Referenced by convertAddV1().

◆ convertAddV7()

void mir_onnx::convertAddV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 43 of file Add.cpp.

44{
45 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
46 mir::Graph *graph = context->getGraph();
47
48 auto result = createOp<mir::ops::AddOp>(graph, inputs[0], inputs[1])->getOutput(0);
49
50 context->setNodeOutputs(onnx_node, {result});
51}
void setNodeOutputs(const onnx::NodeProto &onnx_node, const std::vector< mir::Operation::Output * > &outputs)
std::vector< mir::Operation::Output * > getNodeInputs(const onnx::NodeProto &onnx_node) const

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertAddV6().

◆ convertAveragePoolV1()

void mir_onnx::convertAveragePoolV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 28 of file AveragePool.cpp.

29{
30 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32
33 assert(inputs.size() == 1);
34 auto input = inputs[0];
35
36 const auto &input_shape = input->getShape();
37 if (input_shape.rank() != 4)
38 throw std::runtime_error("AveragePool: only 2-D input is supported.");
39
40 constexpr int num_spatial_dims = 2;
41
42 const auto strides =
43 getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
44 if (strides.size() != num_spatial_dims)
45 throw std::runtime_error("AveragePool: attribute 'strides' has incorrect size.");
46
47 const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
48 if (kernel_shape.size() != num_spatial_dims)
49 throw std::runtime_error("AveragePool: attribute 'kernel_shape' has incorrect size.");
50
51 std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
52 std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
53 if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
54 {
55 const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
56 if (pads.size() != num_spatial_dims * 2)
57 throw std::runtime_error("AveragePool: attribute 'pads' has incorrect size.");
58 padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
59 padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
60 }
61 else
62 {
63 const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
64 const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
65 inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
66 padding_after);
67 }
68
70 attributes.window = kernel_shape;
71 attributes.strides = strides;
72 attributes.padding_before = padding_before;
73 attributes.padding_after = padding_after;
74 attributes.include_pad = false;
76 auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
77
78 context->setNodeOutputs(onnx_node, {result});
79}
result
Definition infer.py:103
T getAttributeValue(const onnx::AttributeProto &attribute)=delete
std::vector< std::int32_t > window
Definition Attributes.h:42
std::vector< std::int32_t > padding_before
Definition Attributes.h:44
std::vector< std::int32_t > padding_after
Definition Attributes.h:45
std::vector< std::int32_t > strides
Definition Attributes.h:43

References mir::AvgPool2DOpAttributes::data_format, findAttribute(), getAttributeValue(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::AvgPool2DOpAttributes::include_pad, inferAutoPadding(), mir::NCHW, mir::AvgPool2DOpAttributes::padding_after, mir::AvgPool2DOpAttributes::padding_before, mir_onnx::ConverterContext::setNodeOutputs(), mir::AvgPool2DOpAttributes::strides, and mir::AvgPool2DOpAttributes::window.

Referenced by convertAveragePoolV7().

◆ convertAveragePoolV10()

void mir_onnx::convertAveragePoolV10 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 90 of file AveragePool.cpp.

91{
92 const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
93 if (ceil_mode != 0)
94 throw std::runtime_error("Not supported ceil_mode attribute!");
95
96 convertAveragePoolV7(onnx_node, context);
97}
void convertAveragePoolV7(const onnx::NodeProto &onnx_node, ConverterContext *context)

References convertAveragePoolV7().

◆ convertAveragePoolV7()

void mir_onnx::convertAveragePoolV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 81 of file AveragePool.cpp.

82{
83 const auto count_include_pad = getAttributeValue<int64_t>(onnx_node, "count_include_pad", 0);
84 if (count_include_pad != 0)
85 throw std::runtime_error("Not supported count_include_pad attribute!");
86
87 convertAveragePoolV1(onnx_node, context);
88}
void convertAveragePoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context)

References convertAveragePoolV1().

Referenced by convertAveragePoolV10().

◆ convertBatchNormalizationV1()

void mir_onnx::convertBatchNormalizationV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 35 of file BatchNormalization.cpp.

36{
37 // consumed_inputs attribute not used
38 convertBatchNormalizationV6(onnx_node, context);
39}
void convertBatchNormalizationV6(const onnx::NodeProto &onnx_node, ConverterContext *context)

References convertBatchNormalizationV6().

◆ convertBatchNormalizationV6()

void mir_onnx::convertBatchNormalizationV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file BatchNormalization.cpp.

42{
43 const auto is_test = getAttributeValue<std::int64_t>(onnx_node, "is_test", 0);
44 if (is_test == 0)
45 throw std::runtime_error("Not supported is_test attribute!");
46
47 convertBatchNormalizationV7(onnx_node, context);
48}
void convertBatchNormalizationV7(const onnx::NodeProto &onnx_node, ConverterContext *context)

References convertBatchNormalizationV7().

Referenced by convertBatchNormalizationV1().

◆ convertBatchNormalizationV7()

void mir_onnx::convertBatchNormalizationV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 50 of file BatchNormalization.cpp.

51{
52 // spatial attribute used only for learning
53
54 convertBatchNormalizationV9(onnx_node, context);
55}
void convertBatchNormalizationV9(const onnx::NodeProto &onnx_node, ConverterContext *context)

References convertBatchNormalizationV9().

Referenced by convertBatchNormalizationV6().

◆ convertBatchNormalizationV9()

void mir_onnx::convertBatchNormalizationV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 57 of file BatchNormalization.cpp.

58{
59 // momentum attrribute used only for learning
60
61 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
62 mir::Graph *graph = context->getGraph();
63
64 assert(inputs.size() == 5);
65 auto input = inputs[0];
66 auto scale = inputs[1];
67 auto bias = inputs[2];
68 auto mean = inputs[3];
69 auto var = inputs[4];
70
71 // 1e-05f is the default epsilon.
72 const auto epsilon = getAttributeValue<float>(onnx_node, "epsilon", 1e-05f);
73
74 // Y = (X - mean) * scale / sqrt(var + epsilon) + bias =
75 // = (X + C1) * C2 + bias
76 // We need these to be constants since we are going to change them.
77 // TODO Implement the formula using ops and let the optimizer constant-fold them.
78 auto scale_op = dynamic_cast<mir::ops::ConstantOp *>(scale->getNode());
79 auto mean_op = dynamic_cast<mir::ops::ConstantOp *>(mean->getNode());
80 auto var_op = dynamic_cast<mir::ops::ConstantOp *>(var->getNode());
81
82 if (scale_op == nullptr || mean_op == nullptr || var_op == nullptr)
83 throw std::runtime_error(
84 "BatchNormalization: only constant 'scale', 'mean' and 'variance' inputs are supported.");
85
86 mir::Tensor<float> scale_accessor(scale_op->getValue());
87 mir::Tensor<float> mean_accessor(mean_op->getValue());
88 mir::Tensor<float> var_accessor(var_op->getValue());
89
90 // C1 = -mean
91 for (const auto &idx : mir::ShapeRange(mean_accessor.getShape()))
92 mean_accessor.at(idx) *= -1;
93
94 // C2 = scale / sqrt(var + epsilon)
95 for (const auto &idx : mir::ShapeRange(scale_accessor.getShape()))
96 scale_accessor.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
97
98 assert(mean_accessor.getShape().rank() == 1);
99 auto input_rank = input->getShape().rank();
100 if (input_rank < 2)
101 throw std::runtime_error("Inputs with shape rank < 2 are not supported for batchnorm");
102
103 mir::Shape new_shape(std::vector<std::int32_t>(input_rank, 1));
104
105 new_shape.dim(1) = mean_accessor.getShape().dim(0); // set channel dim
106
107 auto reshaped_mean = createOp<mir::ops::ReshapeOp>(graph, mean, new_shape)->getOutput(0);
108 auto reshaped_scale = createOp<mir::ops::ReshapeOp>(graph, scale, new_shape)->getOutput(0);
109 auto reshaped_bias = createOp<mir::ops::ReshapeOp>(graph, bias, new_shape)->getOutput(0);
110
111 // Y = (X + C1) * C2 + bias
112 auto result = createOp<mir::ops::AddOp>(graph, input, reshaped_mean)->getOutput(0);
113 result = createOp<mir::ops::MulOp>(graph, result, reshaped_scale)->getOutput(0);
114 result = createOp<mir::ops::AddOp>(graph, result, reshaped_bias)->getOutput(0);
115
116 context->setNodeOutputs(onnx_node, {result});
117}

References mir::Tensor< T >::at(), mir::Shape::dim(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::Tensor< T >::getShape(), mir::Shape::rank(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertBatchNormalizationV7().

◆ convertConcatV1()

void mir_onnx::convertConcatV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Concat.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31
32 const auto axis = getAttributeValue<int64_t>(onnx_node, "axis", 1);
33
34 auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis)->getOutput(0);
35
36 context->setNodeOutputs(onnx_node, {result});
37}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertConcatV4()

void mir_onnx::convertConcatV4 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 39 of file Concat.cpp.

40{
41 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
42 mir::Graph *graph = context->getGraph();
43 // From version 4 axis attribute is required
44 auto attr = findAttribute(onnx_node, "axis");
45 if (!attr)
46 throw std::runtime_error("Attribute axis is required!");
47 int32_t axis = attr->i();
48
49 auto result = createOp<mir::ops::ConcatOp>(graph, inputs, axis)->getOutput(0);
50
51 context->setNodeOutputs(onnx_node, {result});
52}

References findAttribute(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertConstantV1()

void mir_onnx::convertConstantV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 28 of file Constant.cpp.

29{
30 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32
33 const auto onnx_tensor = getAttributeValue<onnx::TensorProto>(onnx_node, "value");
34 auto mir_tensor = createTensor(&onnx_tensor);
35
36 auto result = graph->create<mir::ops::ConstantOp>(mir_tensor)->getOutput(0);
37
38 context->setNodeOutputs(onnx_node, {result});
39}
mir::TensorVariant createTensor(const onnx::TensorProto *tensor)

References createTensor(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertConstantV9().

◆ convertConstantV11()

void mir_onnx::convertConstantV11 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 47 of file Constant.cpp.

48{
49 const auto *value_attr = findAttribute(onnx_node, "value");
50 const auto *sparse_value_attr = findAttribute(onnx_node, "sparse_value");
51 if (value_attr == nullptr && sparse_value_attr == nullptr)
52 throw std::runtime_error("Not enough attributes in Constant operation!");
53
54 if (value_attr != nullptr)
55 return convertConstantV9(onnx_node, context);
56
57 if (sparse_value_attr != nullptr)
58 throw std::runtime_error("Not supported sparse_tensor in Constant operation!");
59}
void convertConstantV9(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Constant.cpp:41

References convertConstantV9(), and findAttribute().

◆ convertConstantV9()

void mir_onnx::convertConstantV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file Constant.cpp.

42{
43 // Since version 9 Constant operation support other types contained in tensor
44 convertConstantV1(onnx_node, context);
45}
void convertConstantV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Constant.cpp:28

References convertConstantV1().

Referenced by convertConstantV11().

◆ convertConvTransposeV1()

void mir_onnx::convertConvTransposeV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 32 of file ConvTranspose.cpp.

33{
34 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
35 mir::Graph *graph = context->getGraph();
36
37 assert(inputs.size() >= 2);
38 auto input = inputs[0];
39 auto kernel = inputs[1];
40
41 const auto group = getAttributeValue<std::int64_t>(onnx_node, "group", 1);
42 if (group != 1)
43 throw std::runtime_error("ConvTranspose: attribute 'group' has unsupported value.");
44
45 const auto &input_shape = input->getShape();
46 if (input_shape.rank() != 4)
47 throw std::runtime_error("ConvTranspose: only 2-D input is supported.");
48
49 constexpr int num_spatial_dims = 2;
50
51 const auto dilations =
52 getAttributeValue(onnx_node, "dilations", std::vector<std::int32_t>(num_spatial_dims, 1));
53 if (dilations.size() != num_spatial_dims)
54 throw std::runtime_error("ConvTranspose: attribute 'dilations' has incorrect size.");
55 if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
56 throw std::runtime_error("ConvTranspose: attribute 'dilations' has unsupported value.");
57
58 const auto strides =
59 getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
60 if (strides.size() != num_spatial_dims)
61 throw std::runtime_error("ConvTranspose: attribute 'strides' has incorrect size.");
62
63 const auto output_padding =
64 getAttributeValue(onnx_node, "output_padding", std::vector<std::int32_t>(num_spatial_dims, 0));
65 if (output_padding.size() != num_spatial_dims)
66 throw std::runtime_error("ConvTranspose: attribute 'output_padding' has incorrect size.");
67 if (!std::all_of(output_padding.cbegin(), output_padding.cend(),
68 [](std::int32_t x) { return x == 0; }))
69 throw std::runtime_error("ConvTranspose: attribute 'output_padding' has unsupported value.");
70
71 // Assuming kernel has IOHW format.
72 assert(kernel->getShape().rank() == 4);
73 const auto kernel_size = getAttributeValue(
74 onnx_node, "kernel_shape",
75 std::vector<std::int32_t>{kernel->getShape().dim(2), kernel->getShape().dim(3)});
76 if (kernel_size.size() != num_spatial_dims)
77 throw std::runtime_error("ConvTranspose: attribute 'kernel_shape' has incorrect size.");
78
79 // ONNX IOHW -> MIR HWOI
80 std::vector<std::size_t> perm{2, 3, 1, 0}; // OIHW -> OHWI
81 kernel = createOp<mir::ops::TransposeOp>(graph, kernel, perm)->getOutput(0);
82
84 if (const auto *output_shape_attr = findAttribute(onnx_node, "output_shape"))
85 {
86 const auto output_size = getAttributeValue<std::vector<std::int32_t>>(*output_shape_attr);
87 if (output_size.size() != num_spatial_dims)
88 throw std::runtime_error("ConvTranspose: attribute 'output_shape' has incorrect size.");
89 const mir::Shape output_shape{input_shape.dim(0), kernel->getShape().dim(2), output_size[0],
90 output_size[1]};
92 attributes.strides = strides;
95 result =
96 createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes, output_shape)->getOutput(0);
97 }
98 else
99 {
100 // TODO This code was not tested.
101 throw std::runtime_error(
102 "ConvTranspose: absence of attribute 'output_shape' is not supported.");
103 std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
104 std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
105 if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
106 {
107 const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
108 if (pads.size() != num_spatial_dims * 2)
109 throw std::runtime_error("ConvTranspose: attribute 'pads' has incorrect size.");
110 padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
111 padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
112 }
113 else
114 {
115 const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
116 inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_size, padding_before,
117 padding_after);
118 }
119 mir::Deconv2DOpAttributes attributes;
120 attributes.strides = strides;
121 attributes.padding_before = padding_before;
122 attributes.padding_after = padding_after;
124 result = createOp<mir::ops::DeConv2DOp>(graph, input, kernel, attributes)->getOutput(0);
125 }
126
127 if (inputs.size() > 2)
128 {
129 auto bias = inputs[2];
130 bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
131 ->getOutput(0);
132 result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
133 }
134
135 context->setNodeOutputs(onnx_node, {result});
136}
Represents an output of a node.
Definition Operation.h:60
const luci_interpreter::RuntimeShape output_shape
void inferAutoPadding(const std::string &pad_type, const mir::Shape &input_shape, const std::vector< std::int32_t > &dilations, const std::vector< std::int32_t > &strides, const std::vector< std::int32_t > &window_size, std::vector< std::int32_t > &padding_before, std::vector< std::int32_t > &padding_after)
ops::PaddingType padding_type
Definition Attributes.h:69
std::vector< std::int32_t > padding_after
Definition Attributes.h:67
std::vector< std::int32_t > strides
Definition Attributes.h:65
std::vector< std::int32_t > padding_before
Definition Attributes.h:66

References mir::Deconv2DOpAttributes::data_format, findAttribute(), getAttributeValue(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), inferAutoPadding(), mir::NCHW, output_shape, mir::Deconv2DOpAttributes::padding_after, mir::Deconv2DOpAttributes::padding_before, mir::Deconv2DOpAttributes::padding_type, mir::ops::SameUpper, mir_onnx::ConverterContext::setNodeOutputs(), and mir::Deconv2DOpAttributes::strides.

◆ convertConvV1()

void mir_onnx::convertConvV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 30 of file Conv.cpp.

31{
32 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
33 mir::Graph *graph = context->getGraph();
34
35 assert(inputs.size() >= 2);
36 auto input = inputs[0];
37 auto kernel = inputs[1];
38
39 auto input_shape = input->getShape();
40 bool conv1d = false;
41 if (input_shape.rank() == 3)
42 {
43 input_shape = {input_shape.dim(0), input_shape.dim(1), input_shape.dim(2), 1};
44 auto reshaped_input = createOp<mir::ops::ReshapeOp>(graph, input, input_shape);
45 input = reshaped_input->getOutput(0);
46 conv1d = true;
47 }
48 else
49 {
50 if (input_shape.rank() != 4)
51 throw std::runtime_error{"Conv is unsupported for tensors with more than 4 dimentions"};
52 }
53
54 constexpr int num_spatial_dims = 2;
55
56 std::vector<int32_t> dilations(num_spatial_dims, 1);
57 if (const auto *dilations_attr = findAttribute(onnx_node, "dilations"))
58 {
59 dilations = getAttributeValue<std::vector<int32_t>>(*dilations_attr);
60 if (conv1d)
61 dilations.emplace_back(1);
62 }
63
64 if (dilations.size() != num_spatial_dims)
65 throw std::runtime_error("Conv: attribute 'dilations' has incorrect size.");
66 if (!std::all_of(dilations.cbegin(), dilations.cend(), [](std::int32_t x) { return x == 1; }))
67 throw std::runtime_error("Conv: attribute 'dilations' has unsupported value.");
68
69 std::vector<int32_t> strides(num_spatial_dims, 1);
70 if (const auto *strides_attr = findAttribute(onnx_node, "strides"))
71 {
72 strides = getAttributeValue<std::vector<int32_t>>(*strides_attr);
73 if (conv1d)
74 strides.emplace_back(1);
75 }
76
77 if (strides.size() != num_spatial_dims)
78 throw std::runtime_error("Conv: attribute 'strides' has incorrect size.");
79
80 // Assuming kernel has OIHW format.
81 if (conv1d)
82 {
83 auto kernel_shape = kernel->getShape();
84 assert(kernel_shape.rank() == 3);
85 kernel_shape = {kernel_shape.dim(0), kernel_shape.dim(1), kernel_shape.dim(2), 1};
86 auto reshaped_kernel = createOp<mir::ops::ReshapeOp>(graph, kernel, kernel_shape);
87 kernel = reshaped_kernel->getOutput(0);
88 }
89
90 std::vector<std::int32_t> kernel_shape{kernel->getShape().dim(2), kernel->getShape().dim(3)};
91 if (const auto *k_shape_attr = findAttribute(onnx_node, "kernel_shape"))
92 {
93 kernel_shape = getAttributeValue<std::vector<std::int32_t>>(*k_shape_attr);
94 if (conv1d)
95 kernel_shape.emplace_back(1);
96 }
97
98 if (kernel_shape.size() != num_spatial_dims)
99 throw std::runtime_error("Conv: attribute 'kernel_shape' has incorrect size.");
100
101 std::vector<std::int32_t> padding_before(num_spatial_dims, 0);
102 std::vector<std::int32_t> padding_after(num_spatial_dims, 0);
103 if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
104 {
105 auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
106 if (conv1d)
107 {
108 pads.emplace_back(0);
109 pads.emplace_back(0);
110 }
111
112 if (pads.size() != num_spatial_dims * 2)
113 throw std::runtime_error("Conv: attribute 'pads' has incorrect size.");
114 const auto fixed_pads = fixPads(input_shape, pads, strides, dilations, kernel_shape);
115 padding_before.assign(fixed_pads.cbegin(), std::next(fixed_pads.cbegin(), num_spatial_dims));
116 padding_after.assign(std::next(fixed_pads.cbegin(), num_spatial_dims), fixed_pads.cend());
117 }
118 else
119 {
120 const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
121 inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
122 padding_after);
123 }
124
125 const auto group = getAttributeValue<std::int64_t>(onnx_node, "group", 1);
126
127 mir::Conv2DOpAttributes attributes;
128 attributes.strides = strides;
129 attributes.padding_before = padding_before;
130 attributes.padding_after = padding_after;
131 attributes.num_groups = group;
133
134 std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
135 kernel = createOp<mir::ops::TransposeOp>(graph, kernel, perm)->getOutput(0);
136 auto result = createOp<mir::ops::Conv2DOp>(graph, input, kernel, attributes)->getOutput(0);
137
138 if (inputs.size() > 2)
139 {
140 auto bias = inputs[2];
141 bias = createOp<mir::ops::ReshapeOp>(graph, bias, mir::Shape{1, bias->getShape().dim(0), 1, 1})
142 ->getOutput(0);
143 result = createOp<mir::ops::AddOp>(graph, result, bias)->getOutput(0);
144 }
145
146 if (conv1d)
147 {
148 auto output_shape = result->getShape();
149 output_shape.resize(output_shape.rank() - 1);
150 result = createOp<mir::ops::ReshapeOp>(graph, result, output_shape)->getOutput(0);
151 }
152
153 context->setNodeOutputs(onnx_node, {result});
154}
void resize(int dimensions_count)
Definition Tensor.h:121
std::vector< std::int32_t > fixPads(const mir::Shape &input_shape, const std::vector< std::int32_t > &pads, const std::vector< std::int32_t > &strides, const std::vector< std::int32_t > &dilation, const std::vector< std::int32_t > &kernel_shape)
std::vector< std::int32_t > padding_after
Definition Attributes.h:33
std::vector< std::int32_t > strides
Definition Attributes.h:31
std::int32_t num_groups
Definition Attributes.h:34
std::vector< std::int32_t > padding_before
Definition Attributes.h:32

References mir::Conv2DOpAttributes::data_format, findAttribute(), fixPads(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), inferAutoPadding(), mir::NCHW, mir::Conv2DOpAttributes::num_groups, output_shape, mir::Conv2DOpAttributes::padding_after, mir::Conv2DOpAttributes::padding_before, luci_interpreter::RuntimeShape::resize(), mir_onnx::ConverterContext::setNodeOutputs(), and mir::Conv2DOpAttributes::strides.

◆ convertDivV7()

void mir_onnx::convertDivV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Div.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30
31 mir::Graph *graph = context->getGraph();
32
33 auto result = createOp<mir::ops::DivOp>(graph, inputs[0], inputs[1])->getOutput(0);
34
35 context->setNodeOutputs(onnx_node, {result});
36}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertDropoutV1()

void mir_onnx::convertDropoutV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 24 of file Dropout.cpp.

25{
26 // consumed_inputs attribute not used
27 convertDropoutV6(onnx_node, context);
28}
void convertDropoutV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Dropout.cpp:30

References convertDropoutV6().

◆ convertDropoutV10()

void mir_onnx::convertDropoutV10 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 44 of file Dropout.cpp.

45{
46 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
47
48 // ratio attribute not used
49
50 // This is a no-op in inference mode.
51 context->setNodeOutputs(onnx_node, {inputs[0]});
52}

References mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertDropoutV6(), and convertDropoutV7().

◆ convertDropoutV6()

void mir_onnx::convertDropoutV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 30 of file Dropout.cpp.

31{
32 const auto is_test = getAttributeValue<std::int64_t>(onnx_node, "is_test", 0);
33 if (is_test == 0)
34 throw std::runtime_error("Not supported is_test attribute!");
35
36 convertDropoutV10(onnx_node, context);
37}
void convertDropoutV10(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Dropout.cpp:44

References convertDropoutV10().

Referenced by convertDropoutV1().

◆ convertDropoutV7()

void mir_onnx::convertDropoutV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 39 of file Dropout.cpp.

40{
41 convertDropoutV10(onnx_node, context);
42}

References convertDropoutV10().

◆ convertEqualV11()

void mir_onnx::convertEqualV11 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Equal.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31
32 auto result = createOp<mir::ops::EqualOp>(graph, inputs[0], inputs[1])->getOutput(0);
33
34 context->setNodeOutputs(onnx_node, {result});
35}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertEqualV7().

◆ convertEqualV7()

void mir_onnx::convertEqualV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 37 of file Equal.cpp.

38{
39 // Other type constraints
40 convertEqualV11(onnx_node, context);
41}
void convertEqualV11(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Equal.cpp:27

References convertEqualV11().

◆ convertExpandV8()

void mir_onnx::convertExpandV8 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 26 of file Expand.cpp.

27{
28 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
29 mir::Graph *graph = context->getGraph();
30
31 if (inputs[1]->getNode()->getType() != mir::Operation::Type::constant)
32 {
33 throw std::runtime_error{"Expand with non-constant input shape is not supported"};
34 }
35
36 auto target_shape = constantToShape(static_cast<mir::ops::ConstantOp *>(inputs[1]->getNode()));
37
38 auto *result = createOp<mir::ops::BroadcastOp>(graph, inputs[0], target_shape)->getOutput(0);
39
40 context->setNodeOutputs(onnx_node, {result});
41}
mir::Shape constantToShape(const mir::ops::ConstantOp *op)
NNFW_TYPE getType(const char *type="")

References constantToShape(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), getType(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertFlattenV1()

void mir_onnx::convertFlattenV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Flatten.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31
32 const auto axis = getAttributeValue<int64_t>(onnx_node, "axis", 1);
33 assert(inputs.size() == 1);
34 const auto &in_shape = inputs[0]->getShape();
35 assert(axis <= in_shape.rank()); // A tensor of rank >= axis
36 int32_t first_dim = 1, second_dim = 1;
37 int32_t dim = 0;
38
39 for (; dim < axis; dim++)
40 first_dim *= in_shape.dim(dim);
41
42 for (; dim < in_shape.rank(); dim++)
43 second_dim *= in_shape.dim(dim);
44
45 mir::Shape out_shape({first_dim, second_dim}); // Output 2D tensor
46
47 auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
48
49 context->setNodeOutputs(onnx_node, {result});
50}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertFlattenV9().

◆ convertFlattenV9()

void mir_onnx::convertFlattenV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 52 of file Flatten.cpp.

53{
54 // Other type constraints
55 convertFlattenV1(onnx_node, context);
56}
void convertFlattenV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Flatten.cpp:27

References convertFlattenV1().

◆ convertGatherV1()

void mir_onnx::convertGatherV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Gather.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31
32 // 0 is the default axis number.
33 const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 0);
34
35 auto result = createOp<mir::ops::GatherOp>(graph, inputs[0], inputs[1], axis)->getOutput(0);
36
37 context->setNodeOutputs(onnx_node, {result});
38}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertGemmV1()

void mir_onnx::convertGemmV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 91 of file Gemm.cpp.

92{
93 return convertGemm(onnx_node, context);
94}

◆ convertGemmV11()

void mir_onnx::convertGemmV11 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 114 of file Gemm.cpp.

115{
116 // This operation differs from V11: input C is optional
117 return convertGemm(onnx_node, context);
118}

◆ convertGemmV6()

void mir_onnx::convertGemmV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 96 of file Gemm.cpp.

97{
98 // This version differs from V1: in description of C input (redundant text "can be inplace.")
99 return convertGemm(onnx_node, context);
100}

◆ convertGemmV7()

void mir_onnx::convertGemmV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 102 of file Gemm.cpp.

103{
104 // This version differs from V6: removed "broadcast" atribute
105 return convertGemm(onnx_node, context);
106}

◆ convertGemmV9()

void mir_onnx::convertGemmV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 108 of file Gemm.cpp.

109{
110 // This version differs from V7: added more supported types
111 return convertGemm(onnx_node, context);
112}

◆ convertGlobalAveragePoolV2()

void mir_onnx::convertGlobalAveragePoolV2 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 26 of file GlobalAveragePool.cpp.

27{
28 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
29 mir::Graph *graph = context->getGraph();
30
31 assert(inputs.size() == 1);
32 auto input = inputs[0];
33
34 const auto &input_shape = input->getShape();
35 if (input_shape.rank() != 4)
36 throw std::runtime_error("GlobalAveragePool: only 2-D input is supported.");
37
38 // GlobalAveragePool is equivalent to AveragePool with kernel size equal
39 // to the spatial dimension of input tensor.
40 const std::vector<std::int32_t> window_size{input->getShape().dim(2), input->getShape().dim(3)};
42 attributes.window = window_size;
44
45 auto result = createOp<mir::ops::AvgPool2DOp>(graph, input, attributes)->getOutput(0);
46
47 context->setNodeOutputs(onnx_node, {result});
48}

References mir::AvgPool2DOpAttributes::data_format, mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::NCHW, mir_onnx::ConverterContext::setNodeOutputs(), and mir::AvgPool2DOpAttributes::window.

◆ convertGreaterV7()

void mir_onnx::convertGreaterV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 37 of file Greater.cpp.

38{
39 convertGreaterVGeneric(onnx_node, context);
40}

◆ convertGreaterV9()

void mir_onnx::convertGreaterV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 42 of file Greater.cpp.

43{
44 convertGreaterVGeneric(onnx_node, context);
45}

◆ convertIdentityV1()

void mir_onnx::convertIdentityV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 22 of file Identity.cpp.

23{
24 const auto inputs = context->getNodeInputs(onnx_node);
25 assert(inputs.size() == 1);
26
27 context->setNodeOutputs(onnx_node, {inputs[0]});
28}

References mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertLessV7()

void mir_onnx::convertLessV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 37 of file Less.cpp.

38{
39 convertLessGeneric(onnx_node, context);
40}

◆ convertLessV9()

void mir_onnx::convertLessV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 42 of file Less.cpp.

43{
44 convertLessGeneric(onnx_node, context);
45}

◆ convertMatMulV1()

void mir_onnx::convertMatMulV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 26 of file MatMul.cpp.

27{
28 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
29 mir::Graph *graph = context->getGraph();
30
31 assert(inputs.size() == 2);
32 auto A = inputs[0];
33 auto B = inputs[1];
34 // MatMul multiply N-dimentional matrix
35 // FullyConnected layer multiply only 2-dimentional matrix
36 if (A->getShape().rank() != 2 || B->getShape().rank() != 2)
37 throw std::runtime_error("Supported only 2D matrix multiplying!");
38 // Calculate A * B.
39 auto result = createOp<mir::ops::FullyConnectedOp>(graph, A, B)->getOutput(0);
40
41 context->setNodeOutputs(onnx_node, {result});
42}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertMatMulV9().

◆ convertMatMulV9()

void mir_onnx::convertMatMulV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 44 of file MatMul.cpp.

45{
46 // Other type constraints
47 convertMatMulV1(onnx_node, context);
48}
void convertMatMulV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition MatMul.cpp:26

References convertMatMulV1().

◆ convertMaxPoolV1()

void mir_onnx::convertMaxPoolV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 28 of file MaxPool.cpp.

29{
30 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32
33 assert(inputs.size() == 1);
34 auto input = inputs[0];
35
36 const auto &input_shape = input->getShape();
37 if (input_shape.rank() != 4)
38 throw std::runtime_error("MaxPool: only 2-D input is supported.");
39
40 constexpr int num_spatial_dims = 2;
41
42 const auto strides =
43 getAttributeValue(onnx_node, "strides", std::vector<std::int32_t>(num_spatial_dims, 1));
44 if (strides.size() != num_spatial_dims)
45 throw std::runtime_error("MaxPool: attribute 'strides' has incorrect size.");
46
47 const auto kernel_shape = getAttributeValue<std::vector<std::int32_t>>(onnx_node, "kernel_shape");
48 if (kernel_shape.size() != num_spatial_dims)
49 throw std::runtime_error("MaxPool: attribute 'kernel_shape' has incorrect size.");
50
51 std::vector<std::int32_t> padding_before;
52 std::vector<std::int32_t> padding_after;
53 if (const auto *pads_attr = findAttribute(onnx_node, "pads"))
54 {
55 const auto pads = getAttributeValue<std::vector<std::int32_t>>(*pads_attr);
56 if (pads.size() != num_spatial_dims * 2)
57 throw std::runtime_error("MaxPool: attribute 'pads' has incorrect size.");
58 padding_before.assign(pads.cbegin(), std::next(pads.cbegin(), num_spatial_dims));
59 padding_after.assign(std::next(pads.cbegin(), num_spatial_dims), pads.cend());
60 }
61 else
62 {
63 const auto auto_pad = getAttributeValue<std::string>(onnx_node, "auto_pad", "NOTSET");
64 const std::vector<std::int32_t> dilations(num_spatial_dims, 1);
65 inferAutoPadding(auto_pad, input_shape, dilations, strides, kernel_shape, padding_before,
66 padding_after);
67 }
68
70 attributes.window = kernel_shape;
71 attributes.strides = strides;
72 attributes.padding_before = padding_before;
73 attributes.padding_after = padding_after;
75 auto result = createOp<mir::ops::MaxPool2DOp>(graph, input, attributes)->getOutput(0);
76
77 context->setNodeOutputs(onnx_node, {result});
78}
std::vector< std::int32_t > window
Definition Attributes.h:54
std::vector< std::int32_t > padding_after
Definition Attributes.h:57
std::vector< std::int32_t > strides
Definition Attributes.h:55
std::vector< std::int32_t > padding_before
Definition Attributes.h:56

References mir::MaxPool2DOpAttributes::data_format, findAttribute(), getAttributeValue(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), inferAutoPadding(), mir::NCHW, mir::MaxPool2DOpAttributes::padding_after, mir::MaxPool2DOpAttributes::padding_before, mir_onnx::ConverterContext::setNodeOutputs(), mir::MaxPool2DOpAttributes::strides, and mir::MaxPool2DOpAttributes::window.

Referenced by convertMaxPoolV8().

◆ convertMaxPoolV10()

void mir_onnx::convertMaxPoolV10 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 89 of file MaxPool.cpp.

90{
91 const auto ceil_mode = getAttributeValue<int64_t>(onnx_node, "ceil_mode", 0);
92 if (ceil_mode != 0)
93 throw std::runtime_error("Not supported ceil_mode attribute!");
94
95 const auto *dilations = findAttribute(onnx_node, "dilations");
96 if (dilations != nullptr)
97 {
98 // check default (=1) dilations on each spatial axis
99 for (auto index = 0; index < dilations->ints_size(); index++)
100 if (dilations->ints(index) != 1)
101 throw std::runtime_error("Not supported dilations in MaxPool operation!");
102 }
103
104 convertMaxPoolV8(onnx_node, context);
105}

References convertMaxPoolV8(), and findAttribute().

◆ convertMaxPoolV8()

void mir_onnx::convertMaxPoolV8 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 80 of file MaxPool.cpp.

81{
82 const auto storage_order = getAttributeValue<int64_t>(onnx_node, "storage_order", 0);
83 if (storage_order != 0)
84 throw std::runtime_error("Not supported storage order attribute!");
85
86 convertMaxPoolV1(onnx_node, context);
87}
void convertMaxPoolV1(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition MaxPool.cpp:28

References convertMaxPoolV1().

Referenced by convertMaxPoolV10().

◆ convertMaxV1()

void mir_onnx::convertMaxV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 39 of file Max.cpp.

40{
41 convertMaxGeneric(onnx_node, context);
42}

◆ convertMaxV6()

void mir_onnx::convertMaxV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 44 of file Max.cpp.

45{
46 convertMaxGeneric(onnx_node, context);
47}

◆ convertMaxV8()

void mir_onnx::convertMaxV8 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 49 of file Max.cpp.

50{
51 convertMaxGeneric(onnx_node, context);
52}

◆ convertMulV7()

void mir_onnx::convertMulV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 26 of file Mul.cpp.

27{
28 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
29 mir::Graph *graph = context->getGraph();
30 auto result = createOp<mir::ops::MulOp>(graph, inputs[0], inputs[1])->getOutput(0);
31
32 context->setNodeOutputs(onnx_node, {result});
33}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertPadAttrName()

void mir_onnx::convertPadAttrName ( const std::string &  pad_attr_name,
const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Pad.cpp.

29{
30 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32
33 assert(inputs.size() == 1);
34 auto input = inputs[0];
35
36 // 0.0f is the default value to be filled into padded cells.
37 const auto value = getAttributeValue<float>(onnx_node, "value", 0.0f);
38 const auto pads = getAttributeValue<std::vector<std::int64_t>>(onnx_node, pad_attr_name);
39 // "constant" is the default mode.
40 const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "constant");
41 if (mode != "constant")
42 throw std::runtime_error("Not supported Pad mode attribute!");
43
44 const int num_dims = input->getShape().rank();
45 assert(static_cast<int>(pads.size()) == num_dims * 2);
46 mir::PadOpAttributes attributes(num_dims);
47 for (int i = 0; i < num_dims; i++)
48 {
49 attributes.padding_before[i] = pads[i];
50 attributes.padding_after[i] = pads[num_dims + i];
51 }
52
53 attributes.padding_value = value;
54
55 auto result = createOp<mir::ops::PadOp>(graph, input, attributes)->getOutput(0);
56
57 context->setNodeOutputs(onnx_node, {result});
58}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::PadOpAttributes::padding_after, mir::PadOpAttributes::padding_before, mir::PadOpAttributes::padding_value, and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertPadV1(), and convertPadV2().

◆ convertPadV1()

void mir_onnx::convertPadV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 60 of file Pad.cpp.

61{
62 convertPadAttrName("paddings", onnx_node, context);
63}
void convertPadAttrName(const std::string &pad_attr_name, const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Pad.cpp:27

References convertPadAttrName().

◆ convertPadV2()

void mir_onnx::convertPadV2 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 65 of file Pad.cpp.

66{
67 convertPadAttrName("pads", onnx_node, context);
68}

References convertPadAttrName().

◆ convertReciprocalV1()

void mir_onnx::convertReciprocalV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 43 of file Reciprocal.cpp.

44{
45 convertReciprocal(onnx_node, context);
46}

◆ convertReciprocalV6()

void mir_onnx::convertReciprocalV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 48 of file Reciprocal.cpp.

49{
50 convertReciprocal(onnx_node, context);
51}

◆ convertReduceMeanV1()

void mir_onnx::convertReduceMeanV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 29 of file ReduceMean.cpp.

30{
31 const auto inputs = context->getNodeInputs(onnx_node);
32 assert(inputs.size() == 1);
33
34 const auto axes = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "axes");
35 const auto keepdims = getAttributeValue<int64_t>(onnx_node, "keepdims", 1);
36
37 std::vector<int32_t> reduce_dims;
38 if (axes.empty())
39 { // reduce over all dimensions
40 reduce_dims.resize(inputs[0]->getShape().rank());
41 std::iota(reduce_dims.begin(), reduce_dims.end(), 0);
42 }
43 else
44 {
45 auto rank = inputs[0]->getShape().rank();
46
47 std::transform(axes.begin(), axes.end(), std::back_inserter(reduce_dims),
48 [rank](int64_t axis) { return axis < 0 ? axis + rank : axis; });
49 }
50 // Keep the reduced dimension or not, default 1 mean keep reduced dimension.
51 bool keep_dims = static_cast<bool>(keepdims);
52
53 mir::Graph *graph = context->getGraph();
54 auto result =
55 createOp<mir::ops::ReduceMeanOp>(graph, inputs[0], reduce_dims, keep_dims)->getOutput(0);
56
57 context->setNodeOutputs(onnx_node, {result});
58}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertReluV1()

void mir_onnx::convertReluV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 36 of file Relu.cpp.

37{
38 convertRelu(onnx_node, context);
39}

◆ convertReluV6()

void mir_onnx::convertReluV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file Relu.cpp.

42{
43 convertRelu(onnx_node, context);
44}

◆ convertReshapeV1()

void mir_onnx::convertReshapeV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 31 of file Reshape.cpp.

32{
33 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
34 mir::Graph *graph = context->getGraph();
35 // consumed_inputs attribute not used
36 const auto *shape_attr = findAttribute(onnx_node, "shape");
37 if (shape_attr && shape_attr->ints_size() > 0)
38 {
39 mir::Shape in_shape = inputs[0]->getShape();
40 mir::Shape out_shape(shape_attr->ints_size());
41 for (int32_t index = 0; index < out_shape.rank(); index++)
42 {
43 const auto dim_value = shape_attr->ints(index);
44 if (dim_value == 0)
45 out_shape.dim(index) = in_shape.dim(index);
46 else
47 out_shape.dim(index) = dim_value;
48 }
49
50 auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
51
52 context->setNodeOutputs(onnx_node, {result});
53 }
54 else // dimension value is unchanged
55 {
56 context->setNodeOutputs(onnx_node, {inputs[0]});
57 }
58}
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47

References mir::Shape::dim(), findAttribute(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::Shape::rank(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertReshapeV5()

void mir_onnx::convertReshapeV5 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 60 of file Reshape.cpp.

61{
62 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
63 mir::Graph *graph = context->getGraph();
64 // The original shape
65 const auto &in_shape = inputs[0]->getShape();
66
67 // Input tensor describing the new shape
68 auto *op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
69 assert(op && "We support only constant shape input");
70 auto shape_tensor = op->getValue();
71 mir::Shape shape_tensor_shape = (shape_tensor).getShape();
72 assert(shape_tensor_shape.rank() == 1);
73 // The rank of the new shape
74 auto cnt = shape_tensor_shape.numElements();
75 // The vector to build the new shape from
76 std::vector<int32_t> shape_vector(cnt);
77 mir::ShapeRange out_range(shape_tensor_shape);
78 mir::Tensor<int64_t> tensor_accessor(shape_tensor);
79
80 int i = 0;
81 for (auto idx : out_range)
82 {
83 if (tensor_accessor.at(idx) == 0)
84 shape_vector[i] = in_shape.dim(i);
85 else if (tensor_accessor.at(idx) == -1)
86 shape_vector[i] = mir::Shape::autoDim;
87 else
88 shape_vector[i] = tensor_accessor.at(idx);
89 i++;
90 }
91 auto out_shape = mir::Shape(shape_vector);
92 auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
93
94 context->setNodeOutputs(onnx_node, {result});
95}
int32_t numElements() const
Definition Shape.cpp:30
int32_t rank() const
Definition Shape.h:43

References mir::Tensor< T >::at(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::Shape::numElements(), mir::Shape::rank(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertShapeV1()

void mir_onnx::convertShapeV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 28 of file Shape.cpp.

29{
30 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32 const auto &input_shape = inputs[0]->getShape();
33 int size = input_shape.rank();
35 std::vector<int64_t> data(static_cast<std::size_t>(size));
36 for (int i = 0; i < size; i++)
37 {
38 data[i] = input_shape.dim(i);
39 }
40 mir::TensorVariant tensor({mir::DataType::INT64, output_shape}, data.data());
41 auto result = createOp<mir::ops::ConstantOp>(graph, tensor)->getOutput(0);
42
43 context->setNodeOutputs(onnx_node, {result});
44}
int32_t size[5]
Definition Slice.cpp:35

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), output_shape, mir_onnx::ConverterContext::setNodeOutputs(), and size.

◆ convertSigmoidV1()

void mir_onnx::convertSigmoidV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 36 of file Sigmoid.cpp.

37{
38 convertSigmoid(onnx_node, context);
39}

◆ convertSigmoidV6()

void mir_onnx::convertSigmoidV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file Sigmoid.cpp.

42{
43 convertSigmoid(onnx_node, context);
44}

◆ convertSoftmaxV1()

void mir_onnx::convertSoftmaxV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Softmax.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31
32 // 1 is the default axis number.
33 const auto axis = getAttributeValue<std::int64_t>(onnx_node, "axis", 1);
34
35 auto result = createOp<mir::ops::SoftmaxOp>(graph, inputs[0], axis)->getOutput(0);
36
37 context->setNodeOutputs(onnx_node, {result});
38}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertSqrtV1()

void mir_onnx::convertSqrtV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 36 of file Sqrt.cpp.

37{
38 convertSqrt(onnx_node, context);
39}

◆ convertSqrtV6()

void mir_onnx::convertSqrtV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file Sqrt.cpp.

42{
43 convertSqrt(onnx_node, context);
44}

◆ convertSubV1()

void mir_onnx::convertSubV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Sub.cpp.

28{
29 // consumed_inputs attribute not used
30 convertSubV6(onnx_node, context);
31}
void convertSubV6(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Sub.cpp:33

References convertSubV6().

◆ convertSubV6()

void mir_onnx::convertSubV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 33 of file Sub.cpp.

34{
35 // broadcast attribute not used
36 const auto *axis = findAttribute(onnx_node, "axis");
37 if (axis != nullptr)
38 throw std::runtime_error("Not supported axis attribute in Sub operation!");
39
40 convertSubV7(onnx_node, context);
41}
void convertSubV7(const onnx::NodeProto &onnx_node, ConverterContext *context)
Definition Sub.cpp:43

References convertSubV7(), and findAttribute().

Referenced by convertSubV1().

◆ convertSubV7()

void mir_onnx::convertSubV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 43 of file Sub.cpp.

44{
45 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
46 mir::Graph *graph = context->getGraph();
47
48 auto result = createOp<mir::ops::SubOp>(graph, inputs[0], inputs[1])->getOutput(0);
49
50 context->setNodeOutputs(onnx_node, {result});
51}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

Referenced by convertSubV6().

◆ convertSumV8()

void mir_onnx::convertSumV8 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 26 of file Sum.cpp.

27{
28 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
29 mir::Graph *graph = context->getGraph();
30 assert(inputs.size() >= 1);
31
32 auto result = inputs[0];
33 for (int i = 1; i < static_cast<int>(inputs.size()); ++i)
34 {
35 result = createOp<mir::ops::AddOp>(graph, result, inputs[i])->getOutput(0);
36 }
37
38 context->setNodeOutputs(onnx_node, {result});
39}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertTanhV1()

void mir_onnx::convertTanhV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 36 of file Tanh.cpp.

37{
38 convertTanh(onnx_node, context);
39}

◆ convertTanhV6()

void mir_onnx::convertTanhV6 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 41 of file Tanh.cpp.

42{
43 convertTanh(onnx_node, context);
44}

◆ convertTransposeV1()

void mir_onnx::convertTransposeV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 28 of file Transpose.cpp.

29{
30 const auto inputs = context->getNodeInputs(onnx_node);
31 mir::Graph *graph = context->getGraph();
32
33 assert(inputs.size() == 1);
34 auto input = inputs[0];
35
36 const int num_axes = input->getShape().rank();
37 std::vector<std::size_t> axis_order(num_axes);
38 const auto *perm_attr = findAttribute(onnx_node, "perm");
39
40 if (perm_attr == nullptr)
41 {
42 // Reverse the dimensions.
43 std::iota(axis_order.rbegin(), axis_order.rend(), 0);
44 }
45 else
46 {
47 const auto perm = getAttributeValue<std::vector<std::int64_t>>(*perm_attr);
48 assert(static_cast<int>(perm.size()) == num_axes);
49 std::copy(perm.cbegin(), perm.cend(), axis_order.begin());
50 }
51
52 auto result = createOp<mir::ops::TransposeOp>(graph, input, axis_order)->getOutput(0);
53
54 context->setNodeOutputs(onnx_node, {result});
55}

References findAttribute(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertUnsqueezeV1()

void mir_onnx::convertUnsqueezeV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 27 of file Unsqueeze.cpp.

28{
29 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
30 mir::Graph *graph = context->getGraph();
31 const auto axes = getAttributeValue<std::vector<std::int64_t>>(onnx_node, "axes");
32 assert(!axes.empty());
33 const mir::Shape &input_shape = inputs[0]->getShape();
34 const int out_rank = input_shape.rank() + static_cast<int>(axes.size());
35 mir::Shape out_shape(out_rank);
36 auto ints_iterator = axes.cbegin();
37 int j = 0;
38 for (int i = 0; i < out_rank; i++)
39 {
40 if (ints_iterator < axes.cend() && i == *ints_iterator)
41 {
42 out_shape.dim(i) = 1;
43 ints_iterator++;
44 }
45 else
46 {
47 out_shape.dim(i) = input_shape.dim(j);
48 j++;
49 }
50 }
51 auto result = createOp<mir::ops::ReshapeOp>(graph, inputs[0], out_shape)->getOutput(0);
52
53 context->setNodeOutputs(onnx_node, {result});
54}

References mir::Shape::dim(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::Shape::rank(), and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertUpsampleV1()

void mir_onnx::convertUpsampleV1 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 32 of file Upsample.cpp.

33{
34 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
35 mir::Graph *graph = context->getGraph();
36
37 // "nearest" is the default mode.
38 std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
39 assert(mode == "nearest" && "Unsupported upscale mode!");
40
41 const float h_scale = getAttributeValue<float>(onnx_node, "height_scale", 0.0f); // required
42 const float w_scale = getAttributeValue<float>(onnx_node, "width_scale", 0.0f); // required
43 if (h_scale < 1.0f || w_scale < 1.0f)
44 throw std::runtime_error("Wrong scale attributes!");
45
46 assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
47 std::vector<float> scales_vector(4);
48 // NCHW
49 scales_vector.at(0) = 1.0f;
50 scales_vector.at(1) = 1.0f;
51 scales_vector.at(2) = h_scale;
52 scales_vector.at(3) = w_scale;
53
54 auto result =
55 createOp<mir::ops::ResizeOp>(graph, inputs[0],
57 ->getOutput(0);
58
59 context->setNodeOutputs(onnx_node, {result});
60}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::ops::ResizeOp::nearestNeighbor, and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertUpsampleV7()

void mir_onnx::convertUpsampleV7 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 62 of file Upsample.cpp.

63{
64 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
65 mir::Graph *graph = context->getGraph();
66
67 // "nearest" is the default mode.
68 std::string mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
69 assert(mode == "nearest" && "Unsupported upscale mode!");
70
71 const auto *scales_attr = findAttribute(onnx_node, "scales");
72 if (!scales_attr)
73 throw std::runtime_error("Not enough required scales attribute!");
74
75 if (scales_attr->floats_size() != inputs[0]->getShape().rank())
76 throw std::runtime_error(
77 "Number of elements of scales should be the same as the rank of input");
78
79 assert(inputs[0]->getShape().rank() == 4 && "Only rank 4 is supported");
80 std::vector<float> scales_vector(4);
81 // NCHW
82 scales_vector.at(0) = scales_attr->floats(0);
83 scales_vector.at(1) = scales_attr->floats(1);
84 scales_vector.at(2) = scales_attr->floats(2);
85 scales_vector.at(3) = scales_attr->floats(3);
86
87 auto result =
88 createOp<mir::ops::ResizeOp>(graph, inputs[0],
90 ->getOutput(0);
91
92 context->setNodeOutputs(onnx_node, {result});
93}

References findAttribute(), mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::ops::ResizeOp::nearestNeighbor, and mir_onnx::ConverterContext::setNodeOutputs().

◆ convertUpsampleV9()

void mir_onnx::convertUpsampleV9 ( const onnx::NodeProto &  onnx_node,
ConverterContext context 
)

Definition at line 95 of file Upsample.cpp.

96{
97 std::vector<mir::Operation::Output *> inputs = context->getNodeInputs(onnx_node);
98 mir::Graph *graph = context->getGraph();
99
100 // "nearest" is the default mode.
101 const auto mode = getAttributeValue<std::string>(onnx_node, "mode", "nearest");
102 if (mode != "nearest")
103 throw std::runtime_error("Upsample: only 'nearest' mode is supported.");
104
105 // relies on attributes being lifted to constants (ONNX optimization pass)
106 assert(inputs.size() > 1);
107 auto *scales = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
108 assert(scales && "Weights could be a constant tensor only");
109 auto scales_tensor = mir::Tensor<float>(scales->getValue());
110 int rank = inputs[0]->getShape().rank();
111 if (rank != 4)
112 throw std::runtime_error("Upsample: only 4-D input is supported.");
113 assert(scales_tensor.getShape().numElements() == rank &&
114 "The number of elements of 'scales' should be the same as the rank of input 'X'");
115 std::vector<float> scales_vector(rank);
116 for (int i = 0; i < scales_tensor.getShape().numElements(); i++)
117 scales_vector[i] = scales_tensor.atOffset(i);
118
119 auto result =
120 createOp<mir::ops::ResizeOp>(graph, inputs[0],
122 ->getOutput(0);
123
124 context->setNodeOutputs(onnx_node, {result});
125}

References mir_onnx::ConverterContext::getGraph(), mir_onnx::ConverterContext::getNodeInputs(), mir::ops::ResizeOp::nearestNeighbor, and mir_onnx::ConverterContext::setNodeOutputs().

◆ createOp()

template<typename OpType , typename... Types>
mir::Operation * mir_onnx::createOp ( mir::Graph graph,
Types &&...  args 
)

Definition at line 41 of file ONNXHelpers.h.

42{
43 auto op = graph->create<OpType>(std::forward<Types>(args)...);
44 op = foldConstants(graph, op);
45 return op;
46}
mir::Operation * foldConstants(mir::Graph *graph, mir::Operation *op)

References foldConstants().

◆ createTensor()

mir::TensorVariant mir_onnx::createTensor ( const onnx::TensorProto *  tensor)

Definition at line 106 of file ONNXHelpers.cpp.

107{
108 mir::DataType type;
109 const void *src_data;
110 mir::Shape shape(tensor->dims_size());
111 for (int i = 0; i < tensor->dims_size(); ++i)
112 {
113 shape.dim(i) = tensor->dims(i);
114 }
115
116 if (tensor->float_data_size() != 0)
117 {
118 assert(tensor->data_type() == onnx::TensorProto::FLOAT);
119 type = mir::DataType::FLOAT32;
120 src_data = tensor->float_data().data();
121 }
122 else if (tensor->double_data_size() != 0)
123 {
124 assert(tensor->data_type() == onnx::TensorProto::DOUBLE);
125 type = mir::DataType::FLOAT64;
126 src_data = tensor->double_data().data();
127 }
128 else if (tensor->int32_data_size() != 0)
129 {
130 assert(tensor->data_type() == onnx::TensorProto::INT32);
131 type = mir::DataType::INT32;
132 src_data = tensor->int32_data().data();
133 }
134 else if (tensor->int64_data_size() != 0)
135 {
136 assert(tensor->data_type() == onnx::TensorProto::INT64);
137 type = mir::DataType::INT64;
138 src_data = tensor->int64_data().data();
139 }
140 else if (tensor->has_raw_data())
141 {
142 type = onnxDataTypeToMirDataType((onnx::TensorProto_DataType)tensor->data_type());
143 src_data = tensor->raw_data().data();
144 }
145 else
146 {
147 throw std::runtime_error("Invalid data in Proto file, investigate");
148 }
149
150 return mir::TensorVariant({type, shape}, src_data);
151}
type
Definition infer.py:18
mir::DataType onnxDataTypeToMirDataType(onnx::TensorProto::DataType type)

References mir::Shape::dim(), and onnxDataTypeToMirDataType().

Referenced by convertConstantV1().

◆ findAttribute()

const onnx::AttributeProto * mir_onnx::findAttribute ( const onnx::NodeProto &  node,
const std::string &  name 
)
inline

Definition at line 74 of file AttributeHelpers.h.

76{
77 const auto &attributes = node.attribute();
78 const auto it = std::find_if(
79 attributes.cbegin(), attributes.cend(),
80 [&name](const onnx::AttributeProto &attribute) { return attribute.name() == name; });
81 if (it == attributes.cend())
82 return nullptr;
83 return &*it;
84}

Referenced by convertAddV6(), convertAveragePoolV1(), convertConcatV4(), convertConstantV11(), convertConvTransposeV1(), convertConvV1(), convertMaxPoolV1(), convertMaxPoolV10(), convertReshapeV1(), convertSubV6(), convertTransposeV1(), convertUpsampleV7(), getAttributeValue(), and getAttributeValue().

◆ fixPads()

std::vector< std::int32_t > mir_onnx::fixPads ( const mir::Shape input_shape,
const std::vector< std::int32_t > &  pads,
const std::vector< std::int32_t > &  strides,
const std::vector< std::int32_t > &  dilation,
const std::vector< std::int32_t > &  kernel_shape 
)

Definition at line 74 of file ConvPoolHelpers.cpp.

79{
80 assert(pads.size() % 2 == 0);
81 int spatial_dimensions = pads.size() / 2;
82 std::vector<std::int32_t> fixed_pads(pads);
83 for (int i = 0; i < spatial_dimensions; ++i)
84 {
85 auto effective_window_dim = (kernel_shape[i] - 1) * dilation[i] + 1;
86 auto effective_input_dim = input_shape.dim(i + 2) + pads[i] + pads[i + spatial_dimensions];
87 // Computing number of "redundant" elements at the end of input dimension
88 // for example we have effective_input_dim == 8, effective_window)dim == 3 and stride == 2:
89 // [1][2][3][4][5][6][7][8] - input
90 // * * * . . . . - first kernel application
91 // . . * * * . . - second kernel application
92 // . . . . * * * - third kernel application
93 // element 8 is unused (remainder should be 1)
94 //
95 // glossary:
96 // i - effective input size
97 // w - effective window size
98 // s - stride
99 // n - number of kernel applications (3 in example)
100 //
101 // i = s * (n-1) + w + r
102 // r = i - w - s * (n-1)
103 // n - is the maximum number of windows we can fit into input, so this formula is equal to
104 // r = (i - w) % s
105 auto remainder = (effective_input_dim - effective_window_dim) % strides[i];
106
107 // remove redundant pad, but no more than there are padding
108 fixed_pads[i + spatial_dimensions] -= std::min(remainder, pads[i + spatial_dimensions]);
109 }
110 return fixed_pads;
111}

References mir::Shape::dim().

Referenced by convertConvV1().

◆ foldConstants()

mir::Operation * mir_onnx::foldConstants ( mir::Graph graph,
mir::Operation op 
)

Definition at line 153 of file ONNXHelpers.cpp.

154{
155 if (op->getType() == mir::Operation::Type::constant ||
156 op->getType() == mir::Operation::Type::input || op->getType() == mir::Operation::Type::output)
157 {
158 // don't fold input, output and constant nodes
159 return op;
160 }
161
162 if (op->getNumOutputs() != 1)
163 {
164 // this operation either have more than 1 output or none at all
165 return op;
166 }
167
168 bool is_foldable =
169 std::all_of(op->getInputs().begin(), op->getInputs().end(), [](mir::Operation::Output *out) {
170 return out->getNode()->getType() == mir::Operation::Type::constant;
171 });
172
173 if (!is_foldable)
174 return op;
175
177 for (mir::Operation::Output *out : op->getInputs())
178 {
179 auto *constant = static_cast<mir::ops::ConstantOp *>(out->getNode());
180 interpreter.setTensor(out, constant->getValue());
181 }
182 op->accept(&interpreter);
183 const mir::TensorVariant &output = interpreter.getTensor(op->getOutput(0));
184
185 return graph->create<mir::ops::ConstantOp>(output);
186}
Type getType() const
Definition Operation.h:123
void accept(IVisitor *v)
Definition Operation.cpp:56
Output * getOutput(std::size_t index)
Definition Operation.h:149
std::deque< Output * > & getInputs()
Definition Operation.h:131
std::size_t getNumOutputs() const
Definition Operation.h:129

References mir::Operation::accept(), mir::Operation::getInputs(), mir::Operation::Output::getNode(), mir::Operation::getNumOutputs(), mir::Operation::getOutput(), and mir::Operation::getType().

Referenced by createOp().

◆ getAttributeValue() [1/9]

template<>
float mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 35 of file AttributeHelpers.h.

36{
37 assert(attribute.type() == onnx::AttributeProto::FLOAT);
38 return attribute.f();
39}

◆ getAttributeValue() [2/9]

template<>
std::int64_t mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 41 of file AttributeHelpers.h.

42{
43 assert(attribute.type() == onnx::AttributeProto::INT);
44 return attribute.i();
45}

◆ getAttributeValue() [3/9]

template<>
std::string mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 47 of file AttributeHelpers.h.

48{
49 assert(attribute.type() == onnx::AttributeProto::STRING);
50 return attribute.s();
51}

◆ getAttributeValue() [4/9]

template<>
onnx::TensorProto mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 53 of file AttributeHelpers.h.

54{
55 assert(attribute.type() == onnx::AttributeProto::TENSOR);
56 return attribute.t();
57}

◆ getAttributeValue() [5/9]

template<>
std::vector< std::int32_t > mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 60 of file AttributeHelpers.h.

61{
62 assert(attribute.type() == onnx::AttributeProto::INTS);
63 // TODO Check that values fit.
64 return {attribute.ints().cbegin(), attribute.ints().cend()};
65}

◆ getAttributeValue() [6/9]

template<>
std::vector< std::int64_t > mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
inline

Definition at line 68 of file AttributeHelpers.h.

69{
70 assert(attribute.type() == onnx::AttributeProto::INTS);
71 return {attribute.ints().cbegin(), attribute.ints().cend()};
72}

◆ getAttributeValue() [7/9]

template<typename T >
T mir_onnx::getAttributeValue ( const onnx::AttributeProto &  attribute)
delete

◆ getAttributeValue() [8/9]

template<typename T >
T mir_onnx::getAttributeValue ( const onnx::NodeProto &  node,
const std::string &  name 
)

Definition at line 86 of file AttributeHelpers.h.

87{
88 const auto *attribute = findAttribute(node, name);
89 if (attribute == nullptr)
90 throw std::runtime_error("Cannot find attribute '" + name + "' in node '" + node.name() + "'.");
91 return getAttributeValue<T>(*attribute);
92}

References findAttribute().

◆ getAttributeValue() [9/9]

template<typename T >
T mir_onnx::getAttributeValue ( const onnx::NodeProto &  node,
const std::string &  name,
default_value 
)

Definition at line 95 of file AttributeHelpers.h.

96{
97 const auto *attribute = findAttribute(node, name);
98 if (attribute == nullptr)
99 return default_value;
100 return getAttributeValue<T>(*attribute);
101}

References findAttribute().

◆ importModelFromBinaryFile()

std::unique_ptr< mir::Graph > mir_onnx::importModelFromBinaryFile ( const std::string &  filename)

Definition at line 223 of file ONNXImporterImpl.cpp.

224{
225 ONNXImporterImpl importer;
226 return importer.importModelFromBinaryFile(filename);
227}

Referenced by loadModel(), and main().

◆ importModelFromTextFile()

std::unique_ptr< mir::Graph > mir_onnx::importModelFromTextFile ( const std::string &  filename)

Definition at line 229 of file ONNXImporterImpl.cpp.

230{
231 ONNXImporterImpl importer;
232 return importer.importModelFromTextFile(filename);
233}

Referenced by main().

◆ inferAutoPadding()

void mir_onnx::inferAutoPadding ( const std::string &  pad_type,
const mir::Shape input_shape,
const std::vector< std::int32_t > &  dilations,
const std::vector< std::int32_t > &  strides,
const std::vector< std::int32_t > &  window_size,
std::vector< std::int32_t > &  padding_before,
std::vector< std::int32_t > &  padding_after 
)

Definition at line 25 of file ConvPoolHelpers.cpp.

31{
32 constexpr int num_spatial_dims = 2;
33
34 if (pad_type == "NOTSET")
35 {
36 // Do nothing.
37 }
38 else if (pad_type == "VALID")
39 {
40 padding_before.assign(num_spatial_dims, 0);
41 padding_after.assign(num_spatial_dims, 0);
42 }
43 else
44 {
45 padding_before.resize(num_spatial_dims);
46 padding_after.resize(num_spatial_dims);
47
48 assert(dilations.size() == num_spatial_dims);
49 assert(strides.size() == num_spatial_dims);
50 assert(window_size.size() == num_spatial_dims);
51
52 for (int i = 0; i < num_spatial_dims; ++i)
53 {
54 const std::int32_t eff_window_size = (window_size[i] - 1) * dilations[i] + 1;
55 // Assuming input has NCHW format.
56 const std::int32_t residual = input_shape.dim(2 + i) % strides[i];
57 const std::int32_t total_pad = std::max(
58 INT32_C(0), residual == 0 ? eff_window_size - strides[i] : eff_window_size - residual);
59 if (pad_type == "SAME_UPPER")
60 {
61 padding_before[i] = total_pad / 2;
62 padding_after[i] = (total_pad + 1) / 2;
63 }
64 else
65 {
66 assert(pad_type == "SAME_LOWER");
67 padding_before[i] = (total_pad + 1) / 2;
68 padding_after[i] = total_pad / 2;
69 }
70 }
71 }
72}

References mir::Shape::dim().

Referenced by convertAveragePoolV1(), convertConvTransposeV1(), convertConvV1(), and convertMaxPoolV1().

◆ loadModel()

std::unique_ptr< mir::Graph > mir_onnx::loadModel ( const std::string &  filename)

Definition at line 235 of file ONNXImporterImpl.cpp.

236{
237 return importModelFromBinaryFile(filename);
238}

References importModelFromBinaryFile().

Referenced by main().

◆ onnxDataTypeToMirDataType()

mir::DataType mir_onnx::onnxDataTypeToMirDataType ( onnx::TensorProto::DataType  type)

Definition at line 79 of file ONNXHelpers.cpp.

80{
81 switch (type)
82 {
83 case onnx::TensorProto_DataType_UINT8:
84 return mir::DataType::UINT8;
85 break;
86 case onnx::TensorProto_DataType_INT32:
87 return mir::DataType::INT32;
88 break;
89 case onnx::TensorProto_DataType_INT64:
90 return mir::DataType::INT64;
91 break;
92 case onnx::TensorProto_DataType_DOUBLE:
93 return mir::DataType::FLOAT64;
94 break;
95 case onnx::TensorProto_DataType_FLOAT:
96 return mir::DataType::FLOAT32;
97 break;
98 case onnx::TensorProto_DataType_UNDEFINED:
99 throw std::runtime_error{"Undefined input data type not supported"};
100 break;
101 default:
102 throw std::runtime_error{"Unsupported tensor element data type"};
103 }
104}

Referenced by createTensor().

◆ registerSupportedOps()

void mir_onnx::registerSupportedOps ( )
inline

Definition at line 64 of file ONNXOpRegistration.h.

65{
66 auto &registry = NodeConverterRegistry::getInstance();
67
68#define REG_CONVERTER(name, version, function) registry.registerConverter(name, version, function)
69#define REG(name, version) REG_CONVERTER(#name, version, convert##name##V##version)
70#define UNSUPPORTED(name, version) REG_CONVERTER(#name, version, nullptr)
71
72 REG(Abs, 1);
73 REG(Abs, 6);
74 UNSUPPORTED(Abs, firstUnknownOpset);
75
76 REG(Add, 1);
77 REG(Add, 6);
78 REG(Add, 7);
79 UNSUPPORTED(Add, firstUnknownOpset);
80
81 REG(AveragePool, 1);
82 REG(AveragePool, 7);
83 REG(AveragePool, 10);
85 UNSUPPORTED(AveragePool, firstUnknownOpset);
86
87 REG(BatchNormalization, 1);
88 REG(BatchNormalization, 6);
89 REG(BatchNormalization, 7);
90 REG(BatchNormalization, 9);
91 UNSUPPORTED(BatchNormalization, firstUnknownOpset);
92
93 REG(Concat, 1);
94 REG(Concat, 4);
95 UNSUPPORTED(Concat, 11);
96 UNSUPPORTED(Concat, firstUnknownOpset);
97
98 REG(Constant, 1);
99 REG(Constant, 9);
100 REG(Constant, 11);
101 UNSUPPORTED(Constant, 12);
102 UNSUPPORTED(Constant, firstUnknownOpset);
103
104 REG(Conv, 1);
105 UNSUPPORTED(Conv, 11);
106 UNSUPPORTED(Conv, firstUnknownOpset);
107
108 REG(ConvTranspose, 1);
109 UNSUPPORTED(ConvTranspose, 11);
110 UNSUPPORTED(ConvTranspose, firstUnknownOpset);
111
112 UNSUPPORTED(Div, 1);
113 UNSUPPORTED(Div, 6);
114 REG(Div, 7);
115 UNSUPPORTED(Div, firstUnknownOpset);
116
117 REG(Dropout, 1);
118 REG(Dropout, 6);
119 REG(Dropout, 7);
120 REG(Dropout, 10);
121 UNSUPPORTED(Dropout, 12);
122 UNSUPPORTED(Dropout, firstUnknownOpset);
123
124 UNSUPPORTED(Equal, 1);
125 REG(Equal, 7);
126 REG(Equal, 11);
127 UNSUPPORTED(Equal, firstUnknownOpset);
128
129 REG(Expand, 8);
130 UNSUPPORTED(Expand, firstUnknownOpset);
131
132 REG(Flatten, 1);
133 REG(Flatten, 9);
134 UNSUPPORTED(Flatten, 11);
135 UNSUPPORTED(Flatten, firstUnknownOpset);
136
137 REG(Gather, 1);
138 UNSUPPORTED(Gather, 11);
139 UNSUPPORTED(Gather, firstUnknownOpset);
140
141 REG(Gemm, 1);
142 REG(Gemm, 6);
143 REG(Gemm, 7);
144 REG(Gemm, 9);
145 REG(Gemm, 11);
146 UNSUPPORTED(Gemm, firstUnknownOpset);
147
148 UNSUPPORTED(GlobalAveragePool, 1);
149 REG(GlobalAveragePool, 2);
150 UNSUPPORTED(GlobalAveragePool, firstUnknownOpset);
151
152 UNSUPPORTED(Greater, 1);
153 REG(Greater, 7);
154 REG(Greater, 9);
155 UNSUPPORTED(Greater, firstUnknownOpset);
156
157 REG(Identity, 1);
158 UNSUPPORTED(Identity, firstUnknownOpset);
159
160 UNSUPPORTED(Less, 1);
161 REG(Less, 7);
162 REG(Less, 9);
163 UNSUPPORTED(Less, firstUnknownOpset);
164
165 REG(MatMul, 1);
166 REG(MatMul, 9);
167 UNSUPPORTED(MatMul, firstUnknownOpset);
168
169 REG(Max, 1);
170 REG(Max, 6);
171 REG(Max, 8);
172 UNSUPPORTED(Max, firstUnknownOpset);
173
174 REG(MaxPool, 1);
175 REG(MaxPool, 8);
176 REG(MaxPool, 10);
177 UNSUPPORTED(MaxPool, 11);
178 UNSUPPORTED(MaxPool, 12);
179 UNSUPPORTED(MaxPool, firstUnknownOpset);
180
181 UNSUPPORTED(Mul, 1);
182 UNSUPPORTED(Mul, 6);
183 REG(Mul, 7);
184 UNSUPPORTED(Mul, firstUnknownOpset);
185
186 REG(Pad, 1);
187 REG(Pad, 2);
188 UNSUPPORTED(Pad, 11);
189 UNSUPPORTED(Pad, firstUnknownOpset);
190
191 REG(Reciprocal, 1);
192 REG(Reciprocal, 6);
193 UNSUPPORTED(Reciprocal, firstUnknownOpset);
194
195 REG(ReduceMean, 1);
196 UNSUPPORTED(ReduceMean, 11);
197 UNSUPPORTED(ReduceMean, firstUnknownOpset);
198
199 REG(Relu, 1);
200 REG(Relu, 6);
201 UNSUPPORTED(Relu, firstUnknownOpset);
202
203 REG(Reshape, 1);
204 REG(Reshape, 5);
205 UNSUPPORTED(Reshape, firstUnknownOpset);
206
207 REG(Shape, 1);
208 UNSUPPORTED(Shape, firstUnknownOpset);
209
210 REG(Sigmoid, 1);
211 REG(Sigmoid, 6);
212 UNSUPPORTED(Sigmoid, firstUnknownOpset);
213
214 REG(Softmax, 1);
215 // TODO SoftmaxV11 is mostly the same, needs a check though
216 UNSUPPORTED(Softmax, firstUnknownOpset);
217
218 REG(Sqrt, 1);
219 REG(Sqrt, 6);
220 UNSUPPORTED(Sqrt, firstUnknownOpset);
221
222 REG(Sub, 1);
223 REG(Sub, 6);
224 REG(Sub, 7);
225 UNSUPPORTED(Sub, firstUnknownOpset);
226
227 UNSUPPORTED(Sum, 1);
228 UNSUPPORTED(Sum, 6);
229 REG(Sum, 8);
230 UNSUPPORTED(Sum, firstUnknownOpset);
231
232 REG(Tanh, 1);
233 REG(Tanh, 6);
234 UNSUPPORTED(Tanh, firstUnknownOpset);
235
236 REG(Transpose, 1);
237 UNSUPPORTED(Transpose, firstUnknownOpset);
238
239 REG(Unsqueeze, 1);
240 UNSUPPORTED(Unsqueeze, 11);
241 UNSUPPORTED(Unsqueeze, firstUnknownOpset);
242
243 // Upsample-1 is not mentioned in onnx master and was considered experimental at the time
244 REG(Upsample, 1);
245 REG(Upsample, 7);
246 REG(Upsample, 9);
247 UNSUPPORTED(Upsample, firstUnknownOpset);
248
249#undef REG
250#undef REG_CONVERTER
251#undef UNSUPPORTED
252}
void Add(const float *input1_data, const Dims< 4 > &input1_dims, const float *input2_data, const Dims< 4 > &input2_dims, float *output_data, const Dims< 4 > &output_dims)
Definition Add.float.cpp:28
void AveragePool(const float *input_data, const Dims< 4 > &input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float *output_data, const Dims< 4 > &output_dims)
void Conv(const float *input_data, const Dims< 4 > &input_dims, const float *filter_data, const Dims< 4 > &filter_dims, const float *bias_data, const Dims< 4 > &bias_dims, int stride_width, int stride_height, int pad_width, int pad_height, float *output_data, const Dims< 4 > &output_dims, float *im2col_data, const Dims< 4 > &im2col_dims)
void Div(const float *input1_data, const Dims< 4 > &input1_dims, const float *input2_data, const Dims< 4 > &input2_dims, float *output_data, const Dims< 4 > &output_dims)
Definition Div.float.cpp:28
void MaxPool(const float *input_data, const Dims< 4 > &input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float *output_data, const Dims< 4 > &output_dims)
void Mul(const float *input1_data, const Dims< 4 > &input1_dims, const float *input2_data, const Dims< 4 > &input2_dims, float *output_data, const Dims< 4 > &output_dims)
Definition Mul.float.cpp:28
#define UNSUPPORTED(name, version)
#define REG(name, version)
void Softmax(const float *input_data, const Dims< 4 > &input_dims, float beta, float *output_data, const Dims< 4 > &output_dims)
void Sub(const float *input1_data, const Dims< 4 > &input1_dims, const float *input2_data, const Dims< 4 > &input2_dims, float *output_data, const Dims< 4 > &output_dims)
Definition Sub.float.cpp:28
void Gemm(const Eigen::MatrixBase< Lhs > &lhs, const Eigen::MatrixBase< Rhs > &rhs, Eigen::MatrixBase< Result > *result)
Definition GEMM.h:24
Definition Shape.h:28

References Add(), AveragePool(), Conv(), Div(), firstUnknownOpset, Gemm(), mir_onnx::NodeConverterRegistry::getInstance(), Identity, Max, MaxPool(), Mul(), REG, Relu, Softmax(), Sub(), and UNSUPPORTED.

Variable Documentation

◆ firstUnknownOpset

const int64_t mir_onnx::firstUnknownOpset = 13

Definition at line 31 of file ONNXHelpers.cpp.

Referenced by registerSupportedOps().