ONE - On-device Neural Engine
Loading...
Searching...
No Matches
caffe2_op_creator.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "caffe2_op_creator.h"
18#include "caffe2_proto_helper.h"
19
20#include "mir/ops/AddOp.h"
21#include "mir/ops/AvgPool2DOp.h"
23#include "mir/ops/ConcatOp.h"
24#include "mir/ops/ConstantOp.h"
25#include "mir/ops/Conv2DOp.h"
27#include "mir/ops/MaxPool2DOp.h"
28#include "mir/ops/MulOp.h"
29#include "mir/ops/ReluOp.h"
30#include "mir/ops/ReshapeOp.h"
31#include "mir/ops/ResizeOp.h"
32#include "mir/ops/SigmoidOp.h"
33#include "mir/ops/SoftmaxOp.h"
34#include "mir/ops/TransposeOp.h"
35
36#include "mir/Index.h"
37#include "mir/Shape.h"
38#include "mir/ShapeRange.h"
39#include "mir/Tensor.h"
40#include "mir/TensorUtil.h"
41
42#include <cmath>
43#include <stdexcept>
44#include <vector>
45
46namespace mir_caffe2
47{
48
49using namespace ::caffe2;
50using namespace mir;
51
52//
53// Helper functions
54//
55
56static std::pair<std::vector<int32_t>, std::vector<int32_t>>
57getPadding(const ::caffe2::OperatorDef &op)
58{
59
60 if (hasArgument(op.arg(), "pads"))
61 {
62 // pads order: t l b r
63 auto pads_arg = findArgumentByName(op.arg(), "pads");
64
65 std::vector<int32_t> paddings;
66 for (const auto &pad : pads_arg.ints())
67 paddings.push_back(static_cast<int32_t>(pad));
68
69 assert(paddings.size() == 4);
70
71 int32_t pad_t = paddings[0];
72 int32_t pad_l = paddings[1];
73 int32_t pad_b = paddings[2];
74 int32_t pad_r = paddings[3];
75
76 std::vector<int32_t> padding_before{pad_t, pad_l};
77 std::vector<int32_t> padding_after{pad_b, pad_r};
78 return {padding_before, padding_after};
79 }
80
81 bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
82 hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
83
84 if (has_custom_pad)
85 {
86 int32_t pad_l = getSingleArgument(op, "pad_l", 0);
87 int32_t pad_t = getSingleArgument(op, "pad_t", 0);
88 int32_t pad_r = getSingleArgument(op, "pad_r", 0);
89 int32_t pad_b = getSingleArgument(op, "pad_b", 0);
90
91 std::vector<int32_t> padding_before{pad_t, pad_l};
92 std::vector<int32_t> padding_after{pad_b, pad_r};
93 return {padding_before, padding_after};
94 }
95
96 int32_t pad = getSingleArgument(op, "pad", 0);
97 return {{pad, pad}, {pad, pad}};
98}
99
100static std::vector<std::int32_t> getStrides(const ::caffe2::OperatorDef &op)
101{
102 std::vector<std::int32_t> strides;
103
104 if (hasArgument(op.arg(), "stride"))
105 {
106 std::int32_t stride = getSingleArgument(op, "stride", 1);
107 strides = {stride, stride};
108 }
109
110 if (hasArgument(op.arg(), "strides"))
111 {
112 // strides order: h w
113 auto strides_arg = findArgumentByName(op.arg(), "strides");
114 for (const auto &s : strides_arg.ints())
115 strides.push_back(s);
116 }
117
118 assert(!strides.empty() && "Strides not found");
119
120 return strides;
121}
122
123static std::vector<std::int32_t> getWindowSize(const ::caffe2::OperatorDef &op,
124 const std::vector<mir::Operation::Output *> &inputs)
125{
126 int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
127 bool has_custom_kernel_size =
128 hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
129 bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
130
131 int kernel_h(0), kernel_w(0);
132 if (is_global_pooling)
133 {
134 const auto &input_shape = inputs[0]->getShape();
135 assert(input_shape.rank() == 4 && "getWindowSize() inputs must be of rank 4");
136 kernel_h = input_shape.dim(2);
137 kernel_w = input_shape.dim(3);
138 }
139 else
140 {
141 if (has_custom_kernel_size)
142 {
143 kernel_h = getSingleArgument(op, "kernel_h", 0);
144 kernel_w = getSingleArgument(op, "kernel_w", 0);
145 }
146 else
147 {
148 if (has_custom_kernels_size)
149 {
150 // kernels order: h w
151 std::vector<int32_t> kernels;
152 auto kernels_arg = findArgumentByName(op.arg(), "kernels");
153 for (const auto &ker : kernels_arg.ints())
154 kernels.push_back(static_cast<int32_t>(ker));
155 assert(kernels.size() == 2);
156 kernel_h = kernels[0];
157 kernel_w = kernels[1];
158 }
159 else
160 {
161 kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
162 }
163 }
164 }
165 return {kernel_h, kernel_w};
166}
167
168//
169// Check functions
170//
171
172static void checkLayout(const OperatorDef &op)
173{
174 if (getSingleArgument(op, "order", "NCHW") != "NCHW")
175 throw std::runtime_error(op.type() + ": only 'NCHW' axis order is supported");
176}
177
178static void checkConvLikeOp(const ::caffe2::OperatorDef &op)
179{
180 checkLayout(op);
181
182 // Padding
183 bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r") ||
184 hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
185
186 if (has_custom_pad && hasArgument(op.arg(), "pad"))
187 throw std::runtime_error("Custom pad can't be combined with overall pad");
188
189 if (has_custom_pad && !(hasArgument(op.arg(), "pad_l") && hasArgument(op.arg(), "pad_r") &&
190 hasArgument(op.arg(), "pad_t") && hasArgument(op.arg(), "pad_b")))
191 throw std::runtime_error("If one custom pad specified - all custom pads must be specified");
192
193 // Kernel size
194 bool has_custom_kernel_size =
195 hasArgument(op.arg(), "kernel_h") || hasArgument(op.arg(), "kernel_w");
196
197 if (has_custom_kernel_size && hasArgument(op.arg(), "kernel"))
198 throw std::runtime_error("Custom kernel size can't be combined with overall kernel size");
199
200 if (has_custom_kernel_size &&
201 !(hasArgument(op.arg(), "kernel_h") && hasArgument(op.arg(), "kernel_w")))
202 throw std::runtime_error(
203 "If one custom kernel size specified - all custom kernel sizes must be specified");
204}
205
206static mir::TensorVariant createTensor(const OperatorDef &op)
207{
208 assert(hasArgument(op.arg(), "shape") && hasArgument(op.arg(), "values"));
209
210 const auto &shape = findArgumentByName(op.arg(), "shape");
211 const auto &values = findArgumentByName(op.arg(), "values");
212
213 mir::DataType element_type;
214 const void *src_data;
215 // if values on floats
216 if (!values.floats().empty())
217 {
218 element_type = mir::DataType::FLOAT32;
219 src_data = values.floats().data();
220 }
221 else
222 {
223 assert(!values.ints().empty());
224 if (op.type() == "GivenTensorInt64Fill")
225 {
226 element_type = mir::DataType::INT64;
227 }
228 else
229 {
230 element_type = mir::DataType::INT32;
231 }
232 src_data = values.ints().data();
233 }
234
235 mir::Shape tensor_shape(shape.ints_size());
236
237 for (int i = 0; i < shape.ints_size(); ++i)
238 {
239 tensor_shape.dim(i) = shape.ints(i);
240 }
241
242 return mir::TensorVariant({element_type, tensor_shape}, src_data);
243}
244
245//
246// Convert functions
247//
248
249std::vector<mir::Operation::Output *>
250Caffe2OpCreator::convertConstant(const std::vector<mir::Operation::Output *> &,
251 const ::caffe2::OperatorDef &op)
252{
253 // Constant may not contain any data if it is a fake input.
254 if (!hasArgument(op.arg(), "values"))
255 return {};
256
257 return {createOp<ops::ConstantOp>(createTensor(op))->getOutput(0)};
258}
259
260std::vector<mir::Operation::Output *>
261Caffe2OpCreator::convertAdd(const std::vector<mir::Operation::Output *> &inputs,
262 const ::caffe2::OperatorDef &op)
263{
264 assert(inputs.size() == 2);
265 auto lhs = inputs[0];
266 auto rhs = inputs[1];
267
268 if (getSingleArgument(op, "broadcast", 0) != 0)
269 {
270 // FIXME This only works when 'axis' == 1 and the second input is 1-D.
271 rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
272 auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
273 return {result};
274 }
275
276 auto result = createOp<ops::AddOp>(lhs, rhs)->getOutput(0);
277 return {result};
278}
279
280std::vector<mir::Operation::Output *>
281Caffe2OpCreator::convertAveragePool(const std::vector<mir::Operation::Output *> &inputs,
282 const OperatorDef &op)
283{
284 checkConvLikeOp(op);
285
286 assert(inputs.size() == 1);
287 auto input = inputs[0];
288
289 AvgPool2DOpAttributes attributes;
290 std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
291 attributes.window = getWindowSize(op, inputs);
292 attributes.strides = getStrides(op);
293 attributes.include_pad = false;
294 attributes.data_format = DataFormat::NCHW;
295 auto result = createOp<ops::AvgPool2DOp>(input, attributes)->getOutput(0);
296 return {result};
297}
298
299std::vector<mir::Operation::Output *>
300Caffe2OpCreator::convertConv(const std::vector<mir::Operation::Output *> &inputs,
301 const ::caffe2::OperatorDef &op)
302{
303 // dilation order: h w (not used)
304 mir::Conv2DOpAttributes attributes;
305 attributes.strides = getStrides(op);
306 std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
307 attributes.num_groups = getSingleArgument(op, "group", 1);
308 attributes.data_format = DataFormat::NCHW;
309
310 std::vector<std::size_t> perm{0, 2, 3, 1}; // OIHW -> OHWI
311 auto kernel = createOp<ops::TransposeOp>(inputs[1], perm)->getOutput(0);
312 auto result = createOp<ops::Conv2DOp>(inputs[0], kernel, attributes)->getOutput(0);
313
314 if (op.input_size() > 2)
315 {
316 auto bias = inputs[2];
317 bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
318 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
319 }
320
321 return {result};
322}
323
324std::vector<mir::Operation::Output *>
325Caffe2OpCreator::convertConcat(const std::vector<mir::Operation::Output *> &inputs,
326 const ::caffe2::OperatorDef &op)
327{
328 checkLayout(op);
329
330 // `1` corresponds to the default (channels) axis.
331 int axis = getSingleArgument(op, "axis", 1);
332 auto result = createOp<ops::ConcatOp>(inputs, axis);
333 return {result->getOutput(0)};
334}
335
336std::vector<mir::Operation::Output *>
337Caffe2OpCreator::convertDropout(const std::vector<mir::Operation::Output *> &inputs,
338 const ::caffe2::OperatorDef &)
339{
340 // This is a no-op in inference mode.
341 return {inputs[0]};
342}
343
344std::vector<mir::Operation::Output *>
345Caffe2OpCreator::convertFC(const std::vector<mir::Operation::Output *> &inputs,
346 const ::caffe2::OperatorDef &op)
347{
348 for (auto &s : {"axis", "axis_w", "float16_compute"})
349 if (hasArgument(op.arg(), s))
350 throw std::runtime_error(std::string("FC: only default '") + s + "' value is supported");
351
352 const auto &input_shape = inputs[0]->getShape();
353 // Transform input into 2-D tensor by flattening axes
354 Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
355
356 auto reshape = createOp<ops::ReshapeOp>(inputs[0], shape)->getOutput(0);
357 auto weights =
358 createOp<ops::TransposeOp>(inputs[1], std::vector<std::size_t>{1, 0})->getOutput(0);
359 auto result = createOp<ops::FullyConnectedOp>(reshape, weights)->getOutput(0);
360 result = createOp<ops::AddOp>(result, inputs[2])->getOutput(0);
361
362 return {result};
363}
364
365std::vector<mir::Operation::Output *>
366Caffe2OpCreator::convertMaxPool(const std::vector<mir::Operation::Output *> &inputs,
367 const OperatorDef &op)
368{
369 checkConvLikeOp(op);
370
371 assert(inputs.size() == 1);
372 auto input = inputs[0];
373
374 MaxPool2DOpAttributes attributes;
375 std::tie(attributes.padding_before, attributes.padding_after) = getPadding(op);
376 attributes.window = getWindowSize(op, inputs);
377 attributes.strides = getStrides(op);
378 attributes.data_format = DataFormat::NCHW;
379 auto result = createOp<ops::MaxPool2DOp>(input, attributes)->getOutput(0);
380 return {result};
381}
382
383std::vector<mir::Operation::Output *>
384Caffe2OpCreator::convertMul(const std::vector<mir::Operation::Output *> &inputs,
385 const ::caffe2::OperatorDef &op)
386{
387 assert(inputs.size() == 2);
388 auto lhs = inputs[0];
389 auto rhs = inputs[1];
390
391 if (getSingleArgument(op, "broadcast", 0) != 0)
392 {
393 // FIXME This only works when `axis` == 1 and the second input is 1-D.
394 rhs = createOp<ops::ReshapeOp>(rhs, Shape{1, rhs->getShape().dim(0), 1, 1})->getOutput(0);
395 auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
396 return {result};
397 }
398
399 auto result = createOp<ops::MulOp>(lhs, rhs)->getOutput(0);
400 return {result};
401}
402
403std::vector<mir::Operation::Output *>
404Caffe2OpCreator::convertRelu(const std::vector<mir::Operation::Output *> &inputs)
405{
406 auto relu = createOp<ops::ReluOp>(inputs[0]);
407 return {relu->getOutput(0)};
408}
409
410std::vector<mir::Operation::Output *>
411Caffe2OpCreator::convertResizeNearest(const std::vector<mir::Operation::Output *> &inputs,
412 const ::caffe2::OperatorDef &op)
413{
414 std::vector<float> scales(4);
415 assert(inputs[0]->getShape().rank() == 4 && "only 4d tensors is supported");
416 // Assuming NCHW format.
417 scales[0] = 1.0f;
418 scales[1] = 1.0f;
419 scales[2] = getSingleArgument(op, "height_scale", 1.0f);
420 scales[3] = getSingleArgument(op, "width_scale", 1.0f);
421 auto result =
422 createOp<ops::ResizeOp>(inputs[0], ops::ResizeOp::ResizeMethod::nearestNeighbor, scales)
423 ->getOutput(0);
424 return {result};
425}
426
427std::vector<mir::Operation::Output *>
428Caffe2OpCreator::convertSigmoid(const std::vector<mir::Operation::Output *> &inputs)
429{
430 auto result = createOp<ops::SigmoidOp>(inputs[0]);
431 return {result->getOutput(0)};
432}
433
434std::vector<mir::Operation::Output *>
435Caffe2OpCreator::convertSoftmax(const std::vector<mir::Operation::Output *> &inputs,
436 const ::caffe2::OperatorDef &op)
437{
438 int axis = getSingleArgument(op, "axis", 1);
439 auto softmax = createOp<ops::SoftmaxOp>(inputs[0], axis);
440 return {softmax->getOutput(0)};
441}
442
443std::vector<mir::Operation::Output *>
444Caffe2OpCreator::convertSpatialBN(const std::vector<mir::Operation::Output *> &inputs,
445 const ::caffe2::OperatorDef &op)
446{
447 checkLayout(op);
448
449 // Sanity checks
450 if (op.input_size() != 5)
451 throw std::runtime_error(
452 "SpatialBN must have exactly 5 inputs ('sums' and 'sumsq' are not supported yet)");
453 if (getSingleArgument(op, "is_test", 1) != 1)
454 throw std::runtime_error("SpatialBN: only test mode supported");
455
456 // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
457
458 auto scale_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
459 auto bias_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[2]->getNode());
460 auto mean_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[3]->getNode());
461 auto var_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[4]->getNode());
462 if (scale_op == nullptr || bias_op == nullptr || mean_op == nullptr || var_op == nullptr)
463 throw std::runtime_error(
464 "SpatialBN: non-constant 'scale', 'bias', 'mean' and 'var' inputs are not supported yet.");
465
466 const auto &scale_tensor = scale_op->getValue();
467 const auto &bias_tensor = bias_op->getValue();
468 const auto &mean_tensor = mean_op->getValue();
469 const auto &var_tensor = var_op->getValue();
470 float eps = getSingleArgument(op, "epsilon", 1e-5f);
471
472 // res1 = X - mean
473 Tensor<float> bias_data(mean_tensor);
474 for (auto &idx : ShapeRange(bias_data.getShape()))
475 bias_data.at(idx) *= -1;
476
477 auto mean = createOp<ops::ConstantOp>(mean_tensor)->getOutput(0);
478 mean = createOp<ops::ReshapeOp>(mean, Shape{1, mean->getShape().dim(0), 1, 1})->getOutput(0);
479 auto result = createOp<ops::AddOp>(inputs[0], mean)->getOutput(0);
480
481 // res2 = res1 * scale / (var + epsilon)
482 Tensor<float> multiplier(scale_tensor);
483 for (auto &idx : ShapeRange(scale_tensor.getShape()))
484 multiplier.at(idx) /= std::sqrt(*reinterpret_cast<float *>(var_tensor.at(idx)) + eps);
485 auto scale = createOp<ops::ConstantOp>(scale_tensor)->getOutput(0);
486 scale = createOp<ops::ReshapeOp>(scale, Shape{1, scale->getShape().dim(0), 1, 1})->getOutput(0);
487 result = createOp<ops::MulOp>(result, scale)->getOutput(0);
488
489 // overall_res = res2 + bias
490 auto bias = createOp<ops::ConstantOp>(bias_tensor)->getOutput(0);
491 bias = createOp<ops::ReshapeOp>(bias, Shape{1, bias->getShape().dim(0), 1, 1})->getOutput(0);
492 result = createOp<ops::AddOp>(result, bias)->getOutput(0);
493
494 return {result};
495}
496
497std::vector<mir::Operation::Output *>
498Caffe2OpCreator::convertSum(const std::vector<mir::Operation::Output *> &inputs)
499{
500 auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
501 for (int i = 2; i < static_cast<int>(inputs.size()); ++i)
502 {
503 result = createOp<ops::AddOp>(result, inputs[i])->getOutput(0);
504 }
505 return {result};
506}
507
508std::vector<mir::Operation::Output *>
509Caffe2OpCreator::convertClip(const std::vector<mir::Operation::Output *> &inputs,
510 const ::caffe2::OperatorDef &op)
511{
512
513 float max = getSingleArgument(op, "max", float(0));
514 float min = getSingleArgument(op, "min", float(0));
515
516 if (min != 0.0f)
517 throw std::runtime_error("Clip: min != 0 is not supported.");
518 if (max <= min)
519 throw std::runtime_error("Clip: max <= min is not supported.");
520 auto cap_relu = createOp<ops::CappedReluOp>(inputs[0], max);
521
522 return {cap_relu->getOutput(0)};
523}
524
525std::vector<mir::Operation::Output *>
526Caffe2OpCreator::convertReshape(const std::vector<mir::Operation::Output *> &inputs,
527 const ::caffe2::OperatorDef &)
528{
529 auto shape_op = dynamic_cast<mir::ops::ConstantOp *>(inputs[1]->getNode());
530 if (shape_op == nullptr)
531 throw std::runtime_error("Reshape: non-constant shape is not supported yet.");
532
533 const auto &shape_tensor = shape_op->getValue();
534
535 Tensor<int64_t> out_shape_tensor(shape_tensor);
536
537 ShapeRange range(out_shape_tensor.getShape());
538 std::vector<int32_t> shape_vec;
539 for (const auto &index : range)
540 {
541 shape_vec.push_back(static_cast<int32_t>(out_shape_tensor.at(index)));
542 }
543 Shape out_shape(shape_vec);
544
545 auto reshape = createOp<ops::ReshapeOp>(inputs[0], out_shape);
546
547 return {reshape->getOutput(0)};
548}
549
550} // namespace mir_caffe2
const Dimension & dim(uint32_t axis) const
Definition TensorShape.h:38
int32_t & dim(int32_t axis) noexcept
Definition Shape.h:47
const Shape & getShape() const
Definition Tensor.h:48
T at(const Index &id) const
Definition Tensor.h:31
std::vector< mir::Operation::Output * > convertMul(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertConstant(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertSpatialBN(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertAveragePool(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertMaxPool(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertClip(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertResizeNearest(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertSoftmax(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertRelu(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertConcat(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertAdd(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertReshape(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertSigmoid(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertFC(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertSum(const std::vector< mir::Operation::Output * > &inputs)
std::vector< mir::Operation::Output * > convertDropout(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
std::vector< mir::Operation::Output * > convertConv(const std::vector< mir::Operation::Output * > &inputs, const ::caffe2::OperatorDef &op)
const ::caffe2::Argument & findArgumentByName(RepArgument args, const std::string &name)
int getSingleArgument(const ::caffe2::OperatorDef &op, const std::string &argument_name, const int default_value)
bool hasArgument(RepArgument args, const std::string &name)
DataType
Definition DataType.h:27
std::vector< std::int32_t > window
Definition Attributes.h:42
std::vector< std::int32_t > padding_before
Definition Attributes.h:44
std::vector< std::int32_t > padding_after
Definition Attributes.h:45
std::vector< std::int32_t > strides
Definition Attributes.h:43
std::vector< std::int32_t > padding_after
Definition Attributes.h:33
std::vector< std::int32_t > strides
Definition Attributes.h:31
std::int32_t num_groups
Definition Attributes.h:34
std::vector< std::int32_t > padding_before
Definition Attributes.h:32
std::vector< std::int32_t > window
Definition Attributes.h:54
std::vector< std::int32_t > padding_after
Definition Attributes.h:57
std::vector< std::int32_t > strides
Definition Attributes.h:55
std::vector< std::int32_t > padding_before
Definition Attributes.h:56