ONE - On-device Neural Engine
Loading...
Searching...
No Matches
DepthwiseConvolutionLayer.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
18#define __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
19
21#include "OperationUtils.h"
22#include "../ExternalContext.h"
23#include "../Tensor.h"
24
25#include <exec/IFunction.h>
26
27namespace onert
28{
29namespace backend
30{
31namespace cpu
32{
33namespace ops
34{
35
37{
38public:
40
41public:
42 void convFloat32();
43
44 void convQ8uPerTensor();
45 void convQ8uPerChannel();
46
47 void convQ8i();
49
50 void configure(const IPortableTensor *input, const IPortableTensor *kernel,
51 const IPortableTensor *bias, const uint32_t paddingLeft,
52 const uint32_t paddingRight, const uint32_t paddingTop,
53 const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
54 const uint32_t multiplier, const uint32_t dilationWidth,
55 const uint32_t dilationHeight, const ir::Activation activation,
56 IPortableTensor *output, const std::shared_ptr<ExternalContext> &external_context);
57
58 void run() override;
59
60private:
61 void prepareF32();
62 void prepareQ8i();
63 void prepareQ8uPerChannel();
64 void prepareQ8iHybridPerChannel();
65 void ensureQ8iHybridPerChannel();
66
67protected:
68 const IPortableTensor *_input{nullptr};
69 const IPortableTensor *_kernel{nullptr};
70 const IPortableTensor *_bias{nullptr};
72
73 uint32_t _paddingLeft{0};
74 uint32_t _paddingTop{0};
75 uint32_t _paddingRight{0};
76 uint32_t _paddingBottom{0};
77
78 uint32_t _strideWidth{0};
79 uint32_t _strideHeight{0};
80
81 uint32_t _multiplier{0};
82
83 uint32_t _dilationWidth{1};
84 uint32_t _dilationHeight{1};
85
87
88 bool _use_padded_filter{false};
89 std::unique_ptr<Tensor> _padded_filter{nullptr};
90 std::unique_ptr<Tensor> _filter_buffers{nullptr};
91
92private:
93 std::shared_ptr<ExternalContext> _external_context;
94
95 bool _prepared{false};
96
97 // Per channel output multiplier and shift.
98 std::vector<int32_t> _per_channel_output_multiplier;
99 std::vector<int> _per_channel_output_shift;
100
101 // For hybrid
102 bool _is_hybrid{false};
103 std::vector<int8_t> _input_quantized;
104 std::vector<float> _input_scaling_factors;
105 std::vector<int32_t> _input_offsets;
106};
107
108} // namespace ops
109} // namespace cpu
110} // namespace backend
111} // namespace onert
112
113#endif // __ONERT_KERNEL_CPU_DEPTHWISECONVOLUTIONLAYER_H__
A tensor class that is portable for other backends.
void configure(const IPortableTensor *input, const IPortableTensor *kernel, const IPortableTensor *bias, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, const uint32_t multiplier, const uint32_t dilationWidth, const uint32_t dilationHeight, const ir::Activation activation, IPortableTensor *output, const std::shared_ptr< ExternalContext > &external_context)