ONE - On-device Neural Engine
Loading...
Searching...
No Matches
mir_caffe::CaffeOpCreator Class Reference

#include <caffe_op_creator.h>

Public Member Functions

 CaffeOpCreator (mir::Graph *g)
 
std::vector< mir::Operation::Output * > convertInput (const caffe::LayerParameter &layer)
 
std::vector< mir::Operation::Output * > convertConvolution (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertInnerProduct (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertConcat (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertPooling (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertSoftmax (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertReshape (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 Converts Caffe Reshape layer to Model IR Reshape operation.
 
std::vector< mir::Operation::Output * > convertReLU (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertScale (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertBatchNorm (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertDropout (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertDeconvolution (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertELU (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertEmbed (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertSigmoid (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertTanH (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertEltwise (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertSplit (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
std::vector< mir::Operation::Output * > convertLSTM (const caffe::LayerParameter &layer, const std::vector< mir::Operation::Output * > &inputs)
 
void checkConvolution (const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
 
void checkPooling (const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
 
void checkReshape (const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
 
void checkBatchNorm (const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
 
void checkLSTM (const caffe::LayerParameter &layer, std::set< std::string > &problems_ops_set)
 

Detailed Description

Definition at line 34 of file caffe_op_creator.h.

Constructor & Destructor Documentation

◆ CaffeOpCreator()

mir_caffe::CaffeOpCreator::CaffeOpCreator ( mir::Graph g)
inlineexplicit

Definition at line 37 of file caffe_op_creator.h.

37: _graph(g){};

Member Function Documentation

◆ checkBatchNorm()

void mir_caffe::CaffeOpCreator::checkBatchNorm ( const caffe::LayerParameter &  layer,
std::set< std::string > &  problems_ops_set 
)

Definition at line 533 of file caffe_op_creator.cpp.

535{
536 const auto &scale_shape = layer.blobs(2).shape();
537
538 // Check that last blob(with scaleFactor) containing only one number
539 if (scale_shape.dim_size() != 1 || scale_shape.dim(0) != 1)
540 problems_ops_set.insert("Unexpected shape of scale parameter in batch norm");
541}

◆ checkConvolution()

void mir_caffe::CaffeOpCreator::checkConvolution ( const caffe::LayerParameter &  layer,
std::set< std::string > &  problems_ops_set 
)

Definition at line 221 of file caffe_op_creator.cpp.

223{
224 const caffe::ConvolutionParameter &params = layer.convolution_param();
225
226 assert(params.stride_size() <= 2);
227
228 if (params.axis() != 1)
229 problems_ops_set.insert("Conv2D: Unsupported axis");
230
231 if (params.pad_size() != 0 && (params.has_pad_h() || params.has_pad_w()))
232 problems_ops_set.insert("Conv2D: Conflicting padding properties");
233
234 if (params.pad_size() > 2)
235 problems_ops_set.insert("Conv2D: Unsupported number of pads");
236}

◆ checkLSTM()

void mir_caffe::CaffeOpCreator::checkLSTM ( const caffe::LayerParameter &  layer,
std::set< std::string > &  problems_ops_set 
)

Definition at line 702 of file caffe_op_creator.cpp.

704{
705 const auto &params = layer.recurrent_param();
706 if (params.expose_hidden())
707 problems_ops_set.insert("LSTM: parameter 'expose_hidden' has unsupported value: " +
708 std::to_string(params.expose_hidden()));
709}

◆ checkPooling()

void mir_caffe::CaffeOpCreator::checkPooling ( const caffe::LayerParameter &  layer,
std::set< std::string > &  problems_ops_set 
)

Definition at line 383 of file caffe_op_creator.cpp.

385{
386 const caffe::PoolingParameter &params = layer.pooling_param();
387
388 if (params.has_global_pooling() && params.global_pooling())
389 problems_ops_set.insert("Pooling: pooling layer global_pooling param is not supported yet");
390
391 if (params.pool() != caffe::PoolingParameter::AVE &&
392 params.pool() != caffe::PoolingParameter::MAX)
393 problems_ops_set.insert("Pooling: unsupported pooling type");
394
395 if (params.has_pad() && (params.has_pad_h() || params.has_pad_w()))
396 problems_ops_set.insert("Pooling: conflicting padding properties in pooling");
397}

◆ checkReshape()

void mir_caffe::CaffeOpCreator::checkReshape ( const caffe::LayerParameter &  layer,
std::set< std::string > &  problems_ops_set 
)

Definition at line 460 of file caffe_op_creator.cpp.

462{
463 const caffe::ReshapeParameter &params = layer.reshape_param();
464
465 if (params.has_axis() || params.has_num_axes())
466 problems_ops_set.insert("Reshape layer axis and num_axes params are not supported yet");
467
468 if (!params.has_shape())
469 problems_ops_set.insert("Reshape layer doesn't have shape parameter");
470
471 const mir::Shape newShape = convertBlobShape(params.shape());
472
473 for (int32_t i = 0; i < newShape.rank(); ++i)
474 if (newShape.dim(i) == 0)
475 problems_ops_set.insert("Reshape layer zero shape values are not supported yet");
476}
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
int32_t rank() const
Definition Shape.h:43

References mir::Shape::dim(), and mir::Shape::rank().

◆ convertBatchNorm()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertBatchNorm ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 544 of file caffe_op_creator.cpp.

546{
547 const caffe::BatchNormParameter &params = layer.batch_norm_param();
548
549 auto input = inputs[0];
550 auto mean_tensor = convertBlob(layer.blobs(0));
551 auto var_tensor = convertBlob(layer.blobs(1));
552 auto scale_tensor = convertBlob(layer.blobs(2));
553 const float eps = params.eps();
554
555 float scale_factor = *reinterpret_cast<float *>(scale_tensor.at(mir::Index{0}));
556
557 // See https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
558 // Y = (X - mean / scale_factor) / sqrt(var / scale_factor + epsilon) =
559 // = (X + C1) * C2
560 if (scale_factor != 0.0f)
561 scale_factor = 1.0f / scale_factor;
562
563 // C1 = -mean / scale_factor
564 Tensor<float> mean_accessor(mean_tensor);
565 for (const auto &idx : ShapeRange(mean_accessor.getShape()))
566 mean_accessor.at(idx) *= -scale_factor;
567 auto c1 = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
568
569 // C2 = 1 / sqrt(var / scale_factor + epsilon)
570 Tensor<float> var_accessor(var_tensor);
571 for (const auto &idx : ShapeRange(var_accessor.getShape()))
572 var_accessor.at(idx) = 1.0f / std::sqrt(var_accessor.at(idx) * scale_factor + eps);
573 auto c2 = createOp<ops::ConstantOp>(var_tensor)->getOutput(0);
574
575 c1 = createOp<ops::ReshapeOp>(c1, Shape{1, c1->getShape().dim(0), 1, 1})->getOutput(0);
576 c2 = createOp<ops::ReshapeOp>(c2, Shape{1, c2->getShape().dim(0), 1, 1})->getOutput(0);
577
578 // Y = (X + C1) * C2
579 auto result = createOp<ops::AddOp>(input, c1)->getOutput(0);
580 result = createOp<ops::MulOp>(result, c2)->getOutput(0);
581
582 return {result};
583}
result
Definition infer.py:103
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
Definition Shape.h:28

References mir::Tensor< T >::at(), mir::Tensor< T >::getShape(), and mir_caffe::input.

◆ convertConcat()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertConcat ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 320 of file caffe_op_creator.cpp.

322{
323 const auto &params = layer.concat_param();
324 auto concat = createOp<ops::ConcatOp>(inputs, params.axis());
325 return {concat->getOutput(0)};
326}

References mir_caffe::concat.

◆ convertConvolution()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertConvolution ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 239 of file caffe_op_creator.cpp.

241{
242 const auto &params = layer.convolution_param();
243 Conv2DOpAttributes attributes;
244
245 convertConvolutionParam(params, attributes);
246 attributes.num_groups = params.group();
247 attributes.data_format = DataFormat::NCHW;
248
249 assert(layer.blobs(0).shape().dim_size() == 4);
250 auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
251 std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
252 kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
253 auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
254
255 // Add the bias, if any.
256 if (params.bias_term())
257 {
258 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
259 bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
260 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
261 }
262
263 return {result};
264}

References mir_caffe::bias, mir::Conv2DOpAttributes::data_format, and mir::Conv2DOpAttributes::num_groups.

◆ convertDeconvolution()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertDeconvolution ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 267 of file caffe_op_creator.cpp.

269{
270 const caffe::ConvolutionParameter &params = layer.convolution_param();
271 Deconv2DOpAttributes attributes;
272
273 convertConvolutionParam(params, attributes);
274 attributes.data_format = DataFormat::NCHW;
275
276 if (params.group() != 1)
277 {
278 throw std::runtime_error("Deconvolution: 'group' != 1 is not supported.");
279 }
280
281 auto kernel = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
282 std::vector<std::size_t> perm{2, 3, 1, 0}; // IOHW -> HWOI
283 kernel = createOp<ops::TransposeOp>(kernel, perm)->getOutput(0);
284 auto result = createOp<ops::DeConv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
285
286 // bias_term is optional (so might not be present) and defaults to true
287 if (params.bias_term())
288 {
289 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
290 bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
291 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
292 }
293
294 return {result};
295}

References mir_caffe::bias, and mir::Deconv2DOpAttributes::data_format.

◆ convertDropout()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertDropout ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 586 of file caffe_op_creator.cpp.

588{
589 // This is a no-op in inference mode.
590 return {inputs[0]};
591}

◆ convertEltwise()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertEltwise ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 638 of file caffe_op_creator.cpp.

640{
641 auto &params = layer.eltwise_param();
642
644 switch (params.operation())
645 {
646 case caffe::EltwiseParameter::PROD:
647 {
648 result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
649 for (int i = 2; i < layer.bottom_size(); ++i)
650 {
651 result = createOp<ops::MulOp>(result, inputs[i])->getOutput(0);
652 }
653 break;
654 }
655 case caffe::EltwiseParameter::SUM:
656 {
657 std::vector<mir::Operation::Output *> scaled_inputs = inputs;
658 if (params.coeff_size() > 0)
659 {
660 assert(params.coeff_size() == layer.bottom_size());
661 for (int i = 0; i < layer.bottom_size(); i++)
662 {
663 if (params.coeff(i) != 1.0f)
664 {
665 const float coeff_val = params.coeff(i);
666 TensorVariant coeff_tensor({DataType::FLOAT32, {}}, &coeff_val);
667 auto coeff_const = createOp<ops::ConstantOp>(coeff_tensor)->getOutput(0);
668 scaled_inputs[i] = createOp<ops::MulOp>(coeff_const, inputs[i])->getOutput(0);
669 }
670 }
671 }
672 result = createOp<ops::AddOp>(scaled_inputs[0], scaled_inputs[1])->getOutput(0);
673 for (int i = 2; i < layer.bottom_size(); ++i)
674 {
675 result = createOp<ops::AddOp>(result, scaled_inputs[i])->getOutput(0);
676 }
677 break;
678 }
679 case caffe::EltwiseParameter::MAX:
680 {
681 result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
682 for (int i = 2; i < layer.bottom_size(); ++i)
683 {
684 result = createOp<ops::MaxOp>(result, inputs[i])->getOutput(0);
685 }
686 break;
687 }
688 default:
689 throw std::runtime_error("Unknown element-wise operation.");
690 }
691 return {result};
692}
Represents an output of a node.
Definition Operation.h:60

◆ convertELU()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertELU ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 594 of file caffe_op_creator.cpp.

596{
597 const caffe::ELUParameter &params = layer.elu_param();
598
599 auto elu = createOp<ops::EluOp>(inputs[0], params.alpha());
600 return {elu->getOutput(0)};
601}

◆ convertEmbed()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertEmbed ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 604 of file caffe_op_creator.cpp.

606{
607 const auto &params = layer.embed_param();
608 auto data = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)));
609 auto result = createOp<ops::GatherOp>(data->getOutput(0), inputs[0], 0)->getOutput(0);
610
611 // Add the bias, if any.
612 if (params.bias_term())
613 {
614 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
615 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
616 }
617
618 return {result};
619}

References mir_caffe::bias, and mir_caffe::data.

◆ convertInnerProduct()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertInnerProduct ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 298 of file caffe_op_creator.cpp.

300{
301 const auto &params = layer.inner_product_param();
302 auto weights = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
303
304 if (!params.transpose())
305 weights = createOp<ops::TransposeOp>(weights, std::vector<std::size_t>{1, 0})->getOutput(0);
306
307 auto result = createFullyConnected(inputs[0], weights, params.axis());
308
309 // Add the bias, if any.
310 if (params.bias_term())
311 {
312 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
313 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
314 }
315
316 return {result};
317}

References mir_caffe::bias.

◆ convertInput()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertInput ( const caffe::LayerParameter &  layer)

Definition at line 144 of file caffe_op_creator.cpp.

145{
146 const auto &params = layer.input_param();
147 const auto num_inputs = layer.top_size();
148 const auto num_shapes = params.shape_size();
149 std::vector<mir::Operation::Output *> outputs;
150
151 assert((num_shapes == 1 || num_shapes == num_inputs) && "Unsupported number of shapes.");
152
153 for (int i = 0; i < num_inputs; ++i)
154 {
155 const auto &blob_shape = params.shape(num_shapes == 1 ? 0 : i);
156 mir::TensorType input_type(DataType::FLOAT32, convertBlobShape(blob_shape));
157 auto input = createOp<ops::InputOp>(input_type)->getOutput(0);
158 outputs.push_back(input);
159 }
160
161 return outputs;
162}

References mir_caffe::input.

◆ convertLSTM()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertLSTM ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 771 of file caffe_op_creator.cpp.

773{
774 const auto &params = layer.recurrent_param();
775
776 // Inputs to the layer.
777 auto x = inputs[0];
778 auto cont = inputs[1];
779 assert(inputs.size() == 2);
780
781 const auto &x_shape = x->getShape();
782 const int32_t seq_length = x_shape.dim(0);
783 const int32_t batch_size = x_shape.dim(1);
784 const int32_t hidden_size = params.num_output();
785
786 // Learned parameters of the layer. Tensors are transposed to match the ModelIR.
787 auto xw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
788 auto xb = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
789 auto hw = createOp<ops::ConstantOp>(convertBlob(layer.blobs(2)))->getOutput(0);
790 xw = createOp<ops::TransposeOp>(xw, std::vector<std::size_t>{1, 0})->getOutput(0);
791 hw = createOp<ops::TransposeOp>(hw, std::vector<std::size_t>{1, 0})->getOutput(0);
792
793 // Add a dummy dimension so that element-wise operations perform properly.
794 cont = createOp<ops::ReshapeOp>(cont, Shape{seq_length, batch_size, 1})->getOutput(0);
795
796 // Initialize cell and hidden states with zeros.
797 auto zero_tensor = createZeroedTensor(Shape{1, batch_size, hidden_size});
798 auto c_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
799 auto h_t = createOp<ops::ConstantOp>(zero_tensor)->getOutput(0);
800
801 auto x_xw = createFullyConnected(x, xw, 2);
802 auto x_xw_b = createOp<ops::AddOp>(x_xw, xb)->getOutput(0);
803
804 // Split input and continuation tensors into seq_length slices.
805 std::vector<mir::Operation::Output *> x_xw_b_slices = createSplit(x_xw_b, seq_length, 0);
806 std::vector<mir::Operation::Output *> cont_slices = createSplit(cont, seq_length, 0);
807 std::vector<mir::Operation::Output *> h_slices(seq_length);
808
809 for (int32_t t = 0; t < seq_length; t++)
810 {
811 auto c_cont_t = createOp<ops::MulOp>(c_t, cont_slices[t])->getOutput(0);
812 auto h_cont_t = createOp<ops::MulOp>(h_t, cont_slices[t])->getOutput(0);
813
814 auto x_xw_b_t = x_xw_b_slices[t];
815 auto h_hw_t = createFullyConnected(h_cont_t, hw, 2);
816 auto activation_inputs_concat = createOp<ops::AddOp>(x_xw_b_t, h_hw_t)->getOutput(0);
817 auto activation_inputs = createSplit(activation_inputs_concat, 4, 2);
818
819 auto i_t = createOp<ops::SigmoidOp>(activation_inputs[0])->getOutput(0);
820 auto f_t = createOp<ops::SigmoidOp>(activation_inputs[1])->getOutput(0);
821 auto o_t = createOp<ops::SigmoidOp>(activation_inputs[2])->getOutput(0);
822 auto g_t = createOp<ops::TanhOp>(activation_inputs[3])->getOutput(0);
823
824 c_t = createOp<ops::AddOp>(createOp<ops::MulOp>(c_cont_t, f_t)->getOutput(0),
825 createOp<ops::MulOp>(i_t, g_t)->getOutput(0))
826 ->getOutput(0);
827 h_t = createOp<ops::MulOp>(createOp<ops::TanhOp>(c_t)->getOutput(0), o_t)->getOutput(0);
828
829 h_slices[t] = h_t;
830 }
831
832 return {createOp<ops::ConcatOp>(h_slices, 0)->getOutput(0)};
833}

◆ convertPooling()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertPooling ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 400 of file caffe_op_creator.cpp.

402{
403 const auto &params = layer.pooling_param();
404
405 assert(inputs.size() == 1);
406 auto input = inputs[0];
407
409
410 switch (params.pool())
411 {
412 case caffe::PoolingParameter::AVE:
413 {
414 AvgPool2DOpAttributes attributes_avg;
415 attributes_avg.data_format = DataFormat::NCHW;
416 convertPoolingParam(params, input->getShape(), attributes_avg);
417 result = createOp<ops::AvgPool2DOp>(input, attributes_avg)->getOutput(0);
418 break;
419 }
420 case caffe::PoolingParameter::MAX:
421 {
422 MaxPool2DOpAttributes attributes_max;
423 attributes_max.data_format = DataFormat::NCHW;
424 convertPoolingParam(params, input->getShape(), attributes_max);
425 result = createOp<ops::MaxPool2DOp>(input, attributes_max)->getOutput(0);
426 break;
427 }
428 default:
429 throw std::runtime_error("Unsupported PoolMethod: " + std::to_string(params.pool()));
430 }
431
432 return {result};
433}

References mir::AvgPool2DOpAttributes::data_format, mir::MaxPool2DOpAttributes::data_format, and mir_caffe::input.

◆ convertReLU()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertReLU ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 496 of file caffe_op_creator.cpp.

498{
500 if (layer.relu_param().has_negative_slope())
501 {
502 float alpha = layer.relu_param().negative_slope();
503 relu = createOp<ops::LeakyReluOp>(inputs[0], alpha);
504 }
505 else
506 {
507 relu = createOp<ops::ReluOp>(inputs[0]);
508 }
509
510 return {relu->getOutput(0)};
511}

◆ convertReshape()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertReshape ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Converts Caffe Reshape layer to Model IR Reshape operation.

Definition at line 485 of file caffe_op_creator.cpp.

487{
488 const caffe::ReshapeParameter &params = layer.reshape_param();
489
490 const mir::Shape new_shape = convertBlobShape(params.shape());
491 auto reshape = createOp<ops::ReshapeOp>(inputs[0], new_shape);
492 return {reshape->getOutput(0)};
493}

References mir_caffe::reshape.

◆ convertScale()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertScale ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 514 of file caffe_op_creator.cpp.

516{
517 const auto &params = layer.scale_param();
518 auto scale = createOp<ops::ConstantOp>(convertBlob(layer.blobs(0)))->getOutput(0);
519 scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
520 auto result = createOp<ops::MulOp>(inputs[0], scale)->getOutput(0);
521
522 // Add the bias, if any.
523 if (params.bias_term())
524 {
525 auto bias = createOp<ops::ConstantOp>(convertBlob(layer.blobs(1)))->getOutput(0);
526 bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
527 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
528 }
529
530 return {result};
531}

References mir_caffe::bias, and mir_caffe::scale.

◆ convertSigmoid()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertSigmoid ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 622 of file caffe_op_creator.cpp.

624{
625 auto result = createOp<ops::SigmoidOp>(inputs[0]);
626 return {result->getOutput(0)};
627}

◆ convertSoftmax()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertSoftmax ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 436 of file caffe_op_creator.cpp.

438{
439 const auto &params = layer.softmax_param();
440
441 // CPP and ACL backends are able to perform Softmax only along the last axis.
442 // FIXME Do it in backends.
443 if (inputs[0]->getShape().rank() == 4)
444 {
445 // For now, we only account for the most common case.
446 if (params.axis() != 1)
447 throw std::runtime_error("Softmax: unsupported axis");
448 int32_t axis = 3;
449 auto input = createOp<ops::TransposeOp>(inputs[0], std::vector<std::size_t>{0, 2, 3, 1});
450 auto softmax = createOp<ops::SoftmaxOp>(input->getOutput(0), axis);
451 auto result =
452 createOp<ops::TransposeOp>(softmax->getOutput(0), std::vector<std::size_t>{0, 3, 1, 2});
453 return {result->getOutput(0)};
454 }
455
456 auto softmax = createOp<ops::SoftmaxOp>(inputs[0], params.axis());
457 return {softmax->getOutput(0)};
458}

References mir_caffe::input, and mir_caffe::softmax.

◆ convertSplit()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertSplit ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 695 of file caffe_op_creator.cpp.

697{
698 std::vector<mir::Operation::Output *> outputs(layer.top_size(), inputs.at(0));
699 return outputs;
700}

◆ convertTanH()

std::vector< mir::Operation::Output * > mir_caffe::CaffeOpCreator::convertTanH ( const caffe::LayerParameter &  layer,
const std::vector< mir::Operation::Output * > &  inputs 
)

Definition at line 630 of file caffe_op_creator.cpp.

632{
633 auto tanh = createOp<ops::TanhOp>(inputs[0]);
634 return {tanh->getOutput(0)};
635}

References mir_caffe::tanh.


The documentation for this class was generated from the following files: