ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
PoolLayer.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "PoolLayer.h"
18
21
22#include <unordered_map>
23
25{
26
27namespace
28{
29template <typename T>
30void avgPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
31 IPortableTensor *output)
32{
33 nnfw::cker::AveragePool<T>(params, getShape(input), getBuffer<T>(input), getShape(output),
34 getBuffer<T>(output));
35}
36
37template <typename T>
38void maxPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
39 IPortableTensor *output)
40{
41 nnfw::cker::MaxPool<T>(params, getShape(input), getBuffer<T>(input), getShape(output),
42 getBuffer<T>(output));
43}
44
45template <typename T>
46std::function<void(const IPortableTensor *, IPortableTensor *)>
47generateKernelGeneric(const nnfw::cker::PoolParams &params, PoolType op_type)
48{
49 if (op_type == PoolType::kAvg)
50 {
51 return std::bind(&avgPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
52 }
53 else if (op_type == PoolType::kMax)
54 {
55 return std::bind(&maxPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
56 }
57 else
58 {
59 throw std::runtime_error{"Pool: unsupported pool type"};
60 }
61}
62} // namespace
63
64PoolLayer::PoolLayer() : _input(nullptr), _output(nullptr), _kernel()
65{
66 // DO NOTHING
67}
68
69#define POOLING_PARAMETERS \
70 nnfw::cker::PoolParams op_params; \
71 op_params.stride_height = strideHeight; \
72 op_params.stride_width = strideWidth; \
73 op_params.filter_height = kernelHeight; \
74 op_params.filter_width = kernelWidth; \
75 op_params.padding_values.height = (int8_t)paddingTop; \
76 op_params.padding_values.width = (int8_t)paddingLeft; \
77 op_params.float_activation_min = 0; \
78 op_params.float_activation_max = 0; \
79 op_params.quantized_activation_min = 0; \
80 op_params.quantized_activation_max = 0;
81
82void PoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t,
83 const uint32_t paddingTop, const uint32_t, const uint32_t strideWidth,
84 const uint32_t strideHeight, const uint32_t kernelWidth,
85 const uint32_t kernelHeight, const ir::Activation activation,
86 IPortableTensor *output, const PoolType op_type)
87{
88 assert(input != nullptr);
89 assert(output != nullptr);
90
91 _input = input;
92 _output = output;
93
95
96 switch (_input->data_type())
97 {
98 case OperandType::FLOAT32:
99 {
100 float output_activation_min = 0;
101 float output_activation_max = 0;
102 CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max);
103 op_params.float_activation_min = output_activation_min;
104 op_params.float_activation_max = output_activation_max;
105
106 _kernel = generateKernelGeneric<float>(op_params, op_type);
107 break;
108 }
109 case OperandType::QUANT_UINT8_ASYMM:
110 {
111 int32_t output_activation_min = 0;
112 int32_t output_activation_max = 0;
113 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
114 &output_activation_max);
115 op_params.quantized_activation_min = output_activation_min;
116 op_params.quantized_activation_max = output_activation_max;
117 _kernel = generateKernelGeneric<uint8_t>(op_params, op_type);
118 break;
119 }
120 case OperandType::QUANT_INT8_ASYMM:
121 {
122 int32_t output_activation_min = 0;
123 int32_t output_activation_max = 0;
124 CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
125 &output_activation_max);
126 op_params.quantized_activation_min = output_activation_min;
127 op_params.quantized_activation_max = output_activation_max;
128 _kernel = generateKernelGeneric<int8_t>(op_params, op_type);
129 break;
130 }
131 default:
132 throw std::runtime_error{"Pool: unsupported data type"};
133 }
134}
135
136void PoolLayer::run() { _kernel(_input, _output); }
137
138#undef AVGPOOLING_PARAMETERS
139
140} // namespace onert::backend::cpu::ops
A tensor class that is portable for other backends.
ir::DataType data_type() const override final
const IPortableTensor * _input
Definition PoolLayer.h:51
void configure(const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t kernelWidth, const uint32_t kernelHeight, const ir::Activation activation, IPortableTensor *output, const PoolType op_type)
Definition PoolLayer.cc:82
#define POOLING_PARAMETERS
Definition PoolLayer.cc:69
nnfw::cker::Shape getShape(const IPortableTensor *tensor)
void CalculateActivationRangeQuantized(ir::Activation activation, const IPortableTensor *output, int32_t *act_min, int32_t *act_max)