51static mir::Shape convertBlobShape(
const caffe::BlobShape &shape)
55 for (
int i = 0; i < shape.dim_size(); ++i)
57 mir_shape.dim(i) = shape.dim(i);
67 int32_t num_parts, int32_t axis)
69 const auto &arg_shape =
arg->getShape();
71 assert(axis >= 0 && axis < arg_shape.rank());
72 int32_t part_size = arg_shape.dim(axis) / num_parts;
73 assert(part_size * num_parts == arg_shape.dim(axis));
75 Shape starts(arg_shape.rank());
76 Shape sizes(arg_shape);
77 sizes.dim(axis) = part_size;
79 std::vector<mir::Operation::Output *> outputs(num_parts);
80 for (int32_t i = 0; i < num_parts; ++i)
82 outputs[i] = createOp<ops::SliceOp>(arg, starts, sizes)->getOutput(0);
83 starts.dim(axis) += part_size;
95 const auto &weights_shape = weights->
getShape();
97 assert(axis >= 0 && axis < input_shape.rank());
98 assert(weights_shape.rank() == 2);
101 Shape result_shape = input_shape;
102 result_shape.resize(axis + 1);
103 result_shape.dim(axis) = weights_shape.dim(1);
106 int32_t outer_size = 1;
107 for (int32_t i = 0; i < axis; ++i)
108 outer_size *= input_shape.dim(i);
109 int32_t inner_size = 1;
110 for (int32_t i = axis; i < input_shape.rank(); ++i)
111 inner_size *= input_shape.dim(i);
113 auto flatten = createOp<ops::ReshapeOp>(
input,
Shape{outer_size, inner_size})->getOutput(0);
114 auto fc = createOp<ops::FullyConnectedOp>(
flatten, weights)->getOutput(0);
115 return createOp<ops::ReshapeOp>(fc, result_shape)->getOutput(0);
118TensorVariant CaffeOpCreator::convertBlob(
const caffe::BlobProto &blob)
120 const void *src_data;
123 if (blob.data_size() != 0)
125 assert(blob.double_data_size() == 0);
126 dtype = mir::DataType::FLOAT32;
127 src_data = blob.data().data();
129 else if (blob.double_data_size() != 0)
131 dtype = mir::DataType::FLOAT64;
132 src_data = blob.double_data().data();
136 throw std::runtime_error(
"No data in Caffe BlobProto, investigate");
139 const mir::Shape shape = convertBlobShape(blob.shape());
143std::vector<mir::Operation::Output *>
146 const auto ¶ms = layer.input_param();
147 const auto num_inputs = layer.top_size();
148 const auto num_shapes = params.shape_size();
149 std::vector<mir::Operation::Output *> outputs;
151 assert((num_shapes == 1 || num_shapes == num_inputs) &&
"Unsupported number of shapes.");
153 for (
int i = 0; i < num_inputs; ++i)
155 const auto &blob_shape = params.shape(num_shapes == 1 ? 0 : i);
156 mir::TensorType input_type(DataType::FLOAT32, convertBlobShape(blob_shape));
157 auto input = createOp<ops::InputOp>(input_type)->getOutput(0);
158 outputs.push_back(
input);
164template <
class OperationAttributes>
165static void convertConvolutionParam(
const caffe::ConvolutionParameter &conv_param,
166 OperationAttributes &attributes)
168 std::int32_t stride_h, stride_w;
169 if (conv_param.has_stride_h() || conv_param.has_stride_w())
172 stride_h = conv_param.stride_h();
173 stride_w = conv_param.stride_w();
175 else if (conv_param.stride_size() == 0)
178 stride_h = stride_w = 1;
180 else if (conv_param.stride_size() == 1)
183 stride_h = stride_w = conv_param.stride(0);
188 assert(conv_param.stride_size() == 2);
189 stride_h = conv_param.stride(0);
190 stride_w = conv_param.stride(1);
192 attributes.strides = {stride_h, stride_w};
194 std::int32_t pad_h, pad_w;
195 if (conv_param.has_pad_h() || conv_param.has_pad_w())
198 pad_h = conv_param.pad_h();
199 pad_w = conv_param.pad_w();
201 else if (conv_param.pad_size() == 0)
206 else if (conv_param.pad_size() == 1)
209 pad_h = pad_w = conv_param.pad(0);
214 assert(conv_param.pad_size() == 2);
215 pad_h = conv_param.pad(0);
216 pad_w = conv_param.pad(1);
218 attributes.padding_after = attributes.padding_before = {pad_h, pad_w};
222 std::set<std::string> &problems_ops_set)
224 const caffe::ConvolutionParameter ¶ms = layer.convolution_param();
226 assert(params.stride_size() <= 2);
228 if (params.axis() != 1)
229 problems_ops_set.insert(
"Conv2D: Unsupported axis");
231 if (params.pad_size() != 0 && (params.has_pad_h() || params.has_pad_w()))
232 problems_ops_set.insert(
"Conv2D: Conflicting padding properties");
234 if (params.pad_size() > 2)
235 problems_ops_set.insert(
"Conv2D: Unsupported number of pads");
238std::vector<mir::Operation::Output *>
240 const std::vector<mir::Operation::Output *> &inputs)
242 const auto ¶ms = layer.convolution_param();
245 convertConvolutionParam(params, attributes);
249 assert(layer.blobs(0).shape().dim_size() == 4);
250 auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
251 std::vector<std::size_t> perm{0, 2, 3, 1};
252 kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
253 auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
256 if (params.bias_term())
258 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
259 bias = createOp<ops::ReshapeOp>(
bias,
Shape{1,
bias->getShape().dim(0), 1, 1})->getOutput(0);
260 result = createOp<ops::AddOp>(result,
bias)->getOutput(0);
266std::vector<mir::Operation::Output *>
268 const std::vector<mir::Operation::Output *> &inputs)
270 const caffe::ConvolutionParameter ¶ms = layer.convolution_param();
273 convertConvolutionParam(params, attributes);
276 if (params.group() != 1)
278 throw std::runtime_error(
"Deconvolution: 'group' != 1 is not supported.");
281 auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
282 std::vector<std::size_t> perm{2, 3, 1, 0};
283 kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
284 auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
287 if (params.bias_term())
289 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
290 bias = createOp<ops::ReshapeOp>(
bias,
Shape{1,
bias->getShape().dim(0), 1, 1})->getOutput(0);
291 result = createOp<ops::AddOp>(result,
bias)->getOutput(0);
297std::vector<mir::Operation::Output *>
299 const std::vector<mir::Operation::Output *> &inputs)
301 const auto ¶ms = layer.inner_product_param();
302 auto weights = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
304 if (!params.transpose())
305 weights = createOp<ops::TransposeOp>(weights, std::vector<std::size_t>{1, 0})->getOutput(0);
307 auto result = createFullyConnected(inputs[0], weights, params.axis());
310 if (params.bias_term())
312 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
313 result = createOp<ops::AddOp>(result,
bias)->getOutput(0);
319std::vector<mir::Operation::Output *>
321 const std::vector<mir::Operation::Output *> &inputs)
323 const auto ¶ms = layer.concat_param();
324 auto concat = createOp<ops::ConcatOp>(inputs, params.axis());
325 return {
concat->getOutput(0)};
328template <
class PoolingAttributes>
329static void convertPoolingParam(
const caffe::PoolingParameter ¶ms,
330 const mir::Shape &input_shape, PoolingAttributes &attributes)
332 std::int32_t kernel_h, kernel_w;
333 assert(!params.global_pooling());
334 if (params.has_kernel_size())
336 kernel_h = kernel_w = params.kernel_size();
340 kernel_h = params.kernel_h();
341 kernel_w = params.kernel_w();
343 attributes.window = {kernel_h, kernel_w};
345 std::int32_t stride_h, stride_w;
346 if (params.has_stride_h() || params.has_stride_w())
348 stride_h = params.stride_h();
349 stride_w = params.stride_w();
353 stride_h = stride_w = params.stride();
355 attributes.strides = {stride_h, stride_w};
357 std::int32_t pad_h, pad_w;
358 if (params.has_pad_h() || params.has_pad_w())
360 pad_h = params.pad_h();
361 pad_w = params.pad_w();
365 pad_h = pad_w = params.pad();
368 attributes.padding_before = attributes.padding_after = {pad_h, pad_w};
372 constexpr int num_spatial_dims = 2;
373 for (
int i = 0; i < num_spatial_dims; ++i)
376 const std::int32_t padded_input =
377 input_shape.
dim(2 + i) + attributes.padding_before[i] + attributes.padding_after[i];
378 if ((padded_input - attributes.window[i]) % attributes.strides[i] != 0)
379 ++attributes.padding_after[i];
384 std::set<std::string> &problems_ops_set)
386 const caffe::PoolingParameter ¶ms = layer.pooling_param();
388 if (params.has_global_pooling() && params.global_pooling())
389 problems_ops_set.insert(
"Pooling: pooling layer global_pooling param is not supported yet");
391 if (params.pool() != caffe::PoolingParameter::AVE &&
392 params.pool() != caffe::PoolingParameter::MAX)
393 problems_ops_set.insert(
"Pooling: unsupported pooling type");
395 if (params.has_pad() && (params.has_pad_h() || params.has_pad_w()))
396 problems_ops_set.insert(
"Pooling: conflicting padding properties in pooling");
399std::vector<mir::Operation::Output *>
401 const std::vector<mir::Operation::Output *> &inputs)
403 const auto ¶ms = layer.pooling_param();
405 assert(inputs.size() == 1);
406 auto input = inputs[0];
410 switch (params.pool())
412 case caffe::PoolingParameter::AVE:
416 convertPoolingParam(params,
input->getShape(), attributes_avg);
417 result = createOp<ops::AvgPool2DOp>(
input, attributes_avg)->getOutput(0);
420 case caffe::PoolingParameter::MAX:
424 convertPoolingParam(params,
input->getShape(), attributes_max);
425 result = createOp<ops::MaxPool2DOp>(
input, attributes_max)->getOutput(0);
429 throw std::runtime_error(
"Unsupported PoolMethod: " + std::to_string(params.pool()));
435std::vector<mir::Operation::Output *>
437 const std::vector<mir::Operation::Output *> &inputs)
439 const auto ¶ms = layer.softmax_param();
443 if (inputs[0]->getShape().rank() == 4)
446 if (params.axis() != 1)
447 throw std::runtime_error(
"Softmax: unsupported axis");
449 auto input = createOp<ops::TransposeOp>(inputs[0], std::vector<std::size_t>{0, 2, 3, 1});
450 auto softmax = createOp<ops::SoftmaxOp>(
input->getOutput(0), axis);
452 createOp<ops::TransposeOp>(
softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
453 return {result->getOutput(0)};
456 auto softmax = createOp<ops::SoftmaxOp>(inputs[0], params.axis());
457 return {
softmax->getOutput(0)};
461 std::set<std::string> &problems_ops_set)
463 const caffe::ReshapeParameter ¶ms = layer.reshape_param();
465 if (params.has_axis() || params.has_num_axes())
466 problems_ops_set.insert(
"Reshape layer axis and num_axes params are not supported yet");
468 if (!params.has_shape())
469 problems_ops_set.insert(
"Reshape layer doesn't have shape parameter");
471 const mir::Shape newShape = convertBlobShape(params.shape());
473 for (int32_t i = 0; i < newShape.
rank(); ++i)
474 if (newShape.
dim(i) == 0)
475 problems_ops_set.insert(
"Reshape layer zero shape values are not supported yet");
484std::vector<mir::Operation::Output *>
486 const std::vector<mir::Operation::Output *> &inputs)
488 const caffe::ReshapeParameter ¶ms = layer.reshape_param();
490 const mir::Shape new_shape = convertBlobShape(params.shape());
491 auto reshape = createOp<ops::ReshapeOp>(inputs[0], new_shape);
492 return {
reshape->getOutput(0)};
495std::vector<mir::Operation::Output *>
497 const std::vector<mir::Operation::Output *> &inputs)
500 if (layer.relu_param().has_negative_slope())
502 float alpha = layer.relu_param().negative_slope();
503 relu = createOp<ops::LeakyReluOp>(inputs[0], alpha);
507 relu = createOp<ops::ReluOp>(inputs[0]);
510 return {relu->getOutput(0)};
513std::vector<mir::Operation::Output *>
515 const std::vector<mir::Operation::Output *> &inputs)
517 const auto ¶ms = layer.scale_param();
518 auto scale = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
519 scale = createOp<ops::ReshapeOp>(
scale,
Shape{1,
scale->getShape().dim(0), 1, 1})->getOutput(0);
520 auto result = createOp<ops::MulOp>(inputs[0],
scale)->getOutput(0);
523 if (params.bias_term())
525 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
526 bias = createOp<ops::ReshapeOp>(
bias,
Shape{1,
bias->getShape().dim(0), 1, 1})->getOutput(0);
527 result = createOp<ops::AddOp>(result,
bias)->getOutput(0);
534 std::set<std::string> &problems_ops_set)
536 const auto &scale_shape = layer.blobs(2).shape();
539 if (scale_shape.dim_size() != 1 || scale_shape.dim(0) != 1)
540 problems_ops_set.insert(
"Unexpected shape of scale parameter in batch norm");
543std::vector<mir::Operation::Output *>
545 const std::vector<mir::Operation::Output *> &inputs)
547 const caffe::BatchNormParameter ¶ms = layer.batch_norm_param();
549 auto input = inputs[0];
550 auto mean_tensor = convertBlob(layer.blobs(0));
551 auto var_tensor = convertBlob(layer.blobs(1));
552 auto scale_tensor = convertBlob(layer.blobs(2));
553 const float eps = params.eps();
555 float scale_factor = *
reinterpret_cast<float *
>(scale_tensor.at(
mir::Index{0}));
560 if (scale_factor != 0.0f)
561 scale_factor = 1.0f / scale_factor;
566 mean_accessor.
at(idx) *= -scale_factor;
567 auto c1 = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
572 var_accessor.
at(idx) = 1.0f / std::sqrt(var_accessor.
at(idx) * scale_factor + eps);
573 auto c2 = createOp<ops::ConstantOp>(var_tensor)->getOutput(0);
575 c1 = createOp<ops::ReshapeOp>(c1,
Shape{1, c1->getShape().dim(0), 1, 1})->getOutput(0);
576 c2 = createOp<ops::ReshapeOp>(c2,
Shape{1, c2->getShape().dim(0), 1, 1})->getOutput(0);
579 auto result = createOp<ops::AddOp>(
input, c1)->getOutput(0);
580 result = createOp<ops::MulOp>(result, c2)->getOutput(0);
585std::vector<mir::Operation::Output *>
587 const std::vector<mir::Operation::Output *> &inputs)
593std::vector<mir::Operation::Output *>
595 const std::vector<mir::Operation::Output *> &inputs)
597 const caffe::ELUParameter ¶ms = layer.elu_param();
599 auto elu = createOp<ops::EluOp>(inputs[0], params.alpha());
600 return {elu->getOutput(0)};
603std::vector<mir::Operation::Output *>
605 const std::vector<mir::Operation::Output *> &inputs)
607 const auto ¶ms = layer.embed_param();
608 auto data = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)));
609 auto result = createOp<ops::GatherOp>(
data->getOutput(0), inputs[0], 0)->getOutput(0);
612 if (params.bias_term())
614 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
615 result = createOp<ops::AddOp>(result,
bias)->getOutput(0);
621std::vector<mir::Operation::Output *>
623 const std::vector<mir::Operation::Output *> &inputs)
625 auto result = createOp<ops::SigmoidOp>(inputs[0]);
626 return {result->getOutput(0)};
629std::vector<mir::Operation::Output *>
631 const std::vector<mir::Operation::Output *> &inputs)
633 auto tanh = createOp<ops::TanhOp>(inputs[0]);
634 return {
tanh->getOutput(0)};
637std::vector<mir::Operation::Output *>
639 const std::vector<mir::Operation::Output *> &inputs)
641 auto ¶ms = layer.eltwise_param();
644 switch (params.operation())
646 case caffe::EltwiseParameter::PROD:
648 result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
649 for (
int i = 2; i < layer.bottom_size(); ++i)
651 result = createOp<ops::MulOp>(result, inputs[i])->getOutput(0);
655 case caffe::EltwiseParameter::SUM:
657 std::vector<mir::Operation::Output *> scaled_inputs = inputs;
658 if (params.coeff_size() > 0)
660 assert(params.coeff_size() == layer.bottom_size());
661 for (
int i = 0; i < layer.bottom_size(); i++)
663 if (params.coeff(i) != 1.0f)
665 const float coeff_val = params.coeff(i);
666 TensorVariant coeff_tensor({DataType::FLOAT32, {}}, &coeff_val);
667 auto coeff_const = createOp<ops::ConstantOp>(coeff_tensor)->getOutput(0);
668 scaled_inputs[i] = createOp<ops::MulOp>(coeff_const, inputs[i])->getOutput(0);
672 result = createOp<ops::AddOp>(scaled_inputs[0], scaled_inputs[1])->getOutput(0);
673 for (
int i = 2; i < layer.bottom_size(); ++i)
675 result = createOp<ops::AddOp>(result, scaled_inputs[i])->getOutput(0);
679 case caffe::EltwiseParameter::MAX:
681 result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
682 for (
int i = 2; i < layer.bottom_size(); ++i)
684 result = createOp<ops::MaxOp>(result, inputs[i])->getOutput(0);
689 throw std::runtime_error(
"Unknown element-wise operation.");
694std::vector<mir::Operation::Output *>
696 const std::vector<mir::Operation::Output *> &inputs)
698 std::vector<mir::Operation::Output *> outputs(layer.top_size(), inputs.at(0));
703 std::set<std::string> &problems_ops_set)
705 const auto ¶ms = layer.recurrent_param();
706 if (params.expose_hidden())
707 problems_ops_set.insert(
"LSTM: parameter 'expose_hidden' has unsupported value: " +
708 std::to_string(params.expose_hidden()));
714 auto elem_type = mir::DataType::FLOAT32;
715 std::vector<float> zeros(
static_cast<std::size_t
>(shape.
numElements()), 0.0f);
770std::vector<mir::Operation::Output *>
772 const std::vector<mir::Operation::Output *> &inputs)
774 const auto ¶ms = layer.recurrent_param();
778 auto cont = inputs[1];
779 assert(inputs.size() == 2);
781 const auto &x_shape = x->getShape();
782 const int32_t seq_length = x_shape.dim(0);
783 const int32_t batch_size = x_shape.dim(1);
784 const int32_t hidden_size = params.num_output();
787 auto xw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
788 auto xb = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
789 auto hw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(2)))->getOutput(0);
790 xw = createOp<ops::TransposeOp>(xw, std::vector<std::size_t>{1, 0})->getOutput(0);
791 hw = createOp<ops::TransposeOp>(hw, std::vector<std::size_t>{1, 0})->getOutput(0);
794 cont = createOp<ops::ReshapeOp>(cont,
Shape{seq_length, batch_size, 1})->getOutput(0);
797 auto zero_tensor = createZeroedTensor(
Shape{1, batch_size, hidden_size});
798 auto c_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
799 auto h_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
801 auto x_xw = createFullyConnected(x, xw, 2);
802 auto x_xw_b = createOp<ops::AddOp>(x_xw, xb)->getOutput(0);
805 std::vector<mir::Operation::Output *> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
806 std::vector<mir::Operation::Output *> cont_slices = createSplit(cont, seq_length, 0);
807 std::vector<mir::Operation::Output *> h_slices(seq_length);
809 for (int32_t t = 0; t < seq_length; t++)
811 auto c_cont_t = createOp<ops::MulOp>(c_t, cont_slices[t])->getOutput(0);
812 auto h_cont_t = createOp<ops::MulOp>(h_t, cont_slices[t])->getOutput(0);
814 auto x_xw_b_t = x_xw_b_slices[t];
815 auto h_hw_t = createFullyConnected(h_cont_t, hw, 2);
816 auto activation_inputs_concat = createOp<ops::AddOp>(x_xw_b_t, h_hw_t)->getOutput(0);
817 auto activation_inputs = createSplit(activation_inputs_concat, 4, 2);
819 auto i_t = createOp<ops::SigmoidOp>(activation_inputs[0])->getOutput(0);
820 auto f_t = createOp<ops::SigmoidOp>(activation_inputs[1])->getOutput(0);
821 auto o_t = createOp<ops::SigmoidOp>(activation_inputs[2])->getOutput(0);
822 auto g_t = createOp<ops::TanhOp>(activation_inputs[3])->getOutput(0);
824 c_t = createOp<ops::AddOp>(createOp<ops::MulOp>(c_cont_t, f_t)->getOutput(0),
825 createOp<ops::MulOp>(i_t, g_t)->getOutput(0))
827 h_t = createOp<ops::MulOp>(createOp<ops::TanhOp>(c_t)->getOutput(0), o_t)->getOutput(0);
832 return {createOp<ops::ConcatOp>(h_slices, 0)->getOutput(0)};
Represents an output of a node.
const Shape & getShape() const
int32_t & dim(int32_t axis) noexcept
int32_t numElements() const
const Shape & getShape() const
T at(const Index &id) const
std::vector< mir::Operation::Output * > convertReLU(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSplit(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
void checkBatchNorm(const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
std::vector< mir::Operation::Output * > convertInput(const caffe::LayerParameter &layer)
void checkReshape(const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
std::vector< mir::Operation::Output * > convertDeconvolution(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertLSTM(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertEltwise(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertELU(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConvolution(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertPooling(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertInnerProduct(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertBatchNorm(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertSigmoid(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
void checkPooling(const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
void checkLSTM(const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
std::vector< mir::Operation::Output * > convertScale(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertReshape(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
Converts Caffe Reshape layer to Model IR Reshape operation.
std::vector< mir::Operation::Output * > convertSoftmax(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDropout(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
void checkConvolution(const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
std::vector< mir::Operation::Output * > convertTanH(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertEmbed(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConcat(const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)