ONE - On-device Neural Engine
Loading...
Searching...
No Matches
FuseArithmeticOps.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18#include "Util.h"
19#include "mir/Graph.h"
20#include "mir/ops/AddOp.h"
21#include "mir/ops/ConstantOp.h"
22#include "mir/ops/Conv2DOp.h"
23#include "mir/ops/MulOp.h"
24
25#include <gtest/gtest.h>
26#include <sstream>
27
28using namespace nnc;
29using namespace mir;
30
31namespace
32{
33
34TEST(OptPass, fuseConvBiasScaleScaleBias)
35{
37
38 // Create graph: 'input->conv->bias->scale->scale->bias'
39 mir::TensorType input_type(mir::DataType::FLOAT32, Shape{1, 299, 299, 3});
40 auto input = g.create<ops::InputOp>(input_type);
41 auto conv_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10, 3, 3, 3}));
42 auto conv = g.create<ops::Conv2DOp>(input->getOutput(0), conv_const->getOutput(0),
44 auto bias1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
45 auto bias1 = g.create<ops::AddOp>(conv->getOutput(0), bias1_const->getOutput(0));
46 auto scale1_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
47 auto scale1 = g.create<ops::MulOp>(bias1->getOutput(0), scale1_const->getOutput(0));
48 auto scale2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
49 auto scale2 = g.create<ops::MulOp>(scale1->getOutput(0), scale2_const->getOutput(0));
50 auto scale3_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
51 auto scale3 = g.create<ops::MulOp>(scale2->getOutput(0), scale3_const->getOutput(0));
52 auto bias2_const = g.create<ops::ConstantOp>(TensorVariant(DataType::FLOAT32, {10}));
53 g.create<ops::AddOp>(scale3->getOutput(0), bias2_const->getOutput(0));
54
55 // Check that layout is desired
56 std::stringstream ss;
57 DumpVisitor d(ss);
59 pass.run(&g);
60 g.accept(&d);
61 // Assert only 'conv->bias' remains
62 ASSERT_TRUE("i_0.const_25.const_23.conv_26.b_24." == ss.str() ||
63 "i_0.const_23.const_25.conv_26.b_24." == ss.str() ||
64 "const_25.i_0.const_23.conv_26.b_24." == ss.str() ||
65 "const_23.i_0.const_25.conv_26.b_24." == ss.str() ||
66 "const_25.const_23.i_0.conv_26.b_24." == ss.str() ||
67 "const_23.const_25.i_0.conv_26.b_24." == ss.str());
68}
69
70} // unnamed namespace
Output * getOutput(std::size_t index)
Definition Operation.h:149
Main purpose of this pass - is to fuse 'Conv->BatchNorm' into 'Conv' Currently 'BatchNorm' split by N...
PassData run(PassData data) override
run compiler pass
TEST(Shape, Base)
Definition Index.cpp:24
void conv(const nncc::core::ADT::feature::Shape &out_shape, nncc::core::ADT::feature::Accessor< OutputDType > &out_data, const nncc::core::ADT::feature::Shape &in_shape, const nncc::core::ADT::feature::Reader< InputDType > &in_data, const nncc::core::ADT::kernel::Shape &ker_shape, const nncc::core::ADT::kernel::Reader< KernelDType > &ker_data, const PadInfo &pad_info, const StrideInfo &stride_info)
Definition Conv2D.h:34
Definition Shape.h:28