ONE - On-device Neural Engine
Loading...
Searching...
No Matches
MaxPool.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef __NNFW_CKER_MAX_POOL_H__
19#define __NNFW_CKER_MAX_POOL_H__
20
21#include "cker/Shape.h"
22#include "cker/Types.h"
23#include "cker/Utils.h"
25#include "cker/eigen/Utils.h"
26
27#include <Eigen/Core>
28
29namespace nnfw
30{
31namespace cker
32{
33
34template <typename T> void MaxPool(const PoolParams &, const Shape &, const T *, const Shape &, T *)
35{
36 static_assert(std::is_integral<T>::value || std::is_floating_point<T>::value,
37 "cker::MaxPool : This function supports only integer or floating point");
38 throw std::runtime_error("cker::MaxPool : Unsupported data type");
39}
40
41template <>
42void MaxPool<float>(const PoolParams &params, const Shape &input_shape, const float *input_data,
43 const Shape &output_shape, float *output_data)
44{
45 assert(input_shape.DimensionsCount() == 4);
46 assert(output_shape.DimensionsCount() == 4);
47 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
48 const int input_height = input_shape.Dims(1);
49 const int input_width = input_shape.Dims(2);
50 const int output_height = output_shape.Dims(1);
51 const int output_width = output_shape.Dims(2);
52 const int stride_height = params.stride_height;
53 const int stride_width = params.stride_width;
54
55 const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
56 auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
57 // Prefill the output to minimum representable float value
58 out_mat.setConstant(std::numeric_limits<float>::lowest());
59 for (int b = 0; b < batches; ++b)
60 {
61 for (int h = 0; h < input_height; ++h)
62 {
63 for (int w = 0; w < input_width; ++w)
64 {
65 // (h_start, h_end) * (w_start, w_end) is the range that the input
66 // vector projects to.
67 int hpad = h + params.padding_values.height;
68 int wpad = w + params.padding_values.width;
69 int h_start =
70 (hpad < params.filter_height) ? 0 : (hpad - params.filter_height) / stride_height + 1;
71 int h_end = std::min(hpad / stride_height + 1, output_height);
72 int w_start =
73 (wpad < params.filter_width) ? 0 : (wpad - params.filter_width) / stride_width + 1;
74 int w_end = std::min(wpad / stride_width + 1, output_width);
75 // compute elementwise sum
76 for (int ph = h_start; ph < h_end; ++ph)
77 {
78 for (int pw = w_start; pw < w_end; ++pw)
79 {
80 int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
81 out_mat.col(out_offset) =
82 out_mat.col(out_offset)
83 .cwiseMax(in_mat.col(NodeOffset(b, h, w, input_height, input_width)));
84 }
85 }
86 }
87 }
88 }
89 const int flat_size = output_shape.FlatSize();
90 for (int i = 0; i < flat_size; ++i)
91 {
92 output_data[i] = ActivationFunctionWithMinMax(output_data[i], params.float_activation_min,
94 }
95}
96
97template <>
98void MaxPool<uint8_t>(const PoolParams &params, const Shape &input_shape, const uint8_t *input_data,
99 const Shape &output_shape, uint8_t *output_data)
100{
101
102 // Here, and in other pooling ops, in order to maintain locality of reference,
103 // to minimize some recalculations, and to load into NEON vector registers, we
104 // use an inner loop down the depth. Since depths can be large and hence we
105 // would need arbitrarily large temporary storage, we divide the work up into
106 // depth tranches just within the batch loop.
107 static constexpr int kPoolingAccTrancheSize = 256;
108
109 assert(params.quantized_activation_min <= params.quantized_activation_max);
110 assert(input_shape.DimensionsCount() == 4);
111 assert(output_shape.DimensionsCount() == 4);
112 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
113 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
114 const int input_height = input_shape.Dims(1);
115 const int input_width = input_shape.Dims(2);
116 const int output_height = output_shape.Dims(1);
117 const int output_width = output_shape.Dims(2);
118 const int stride_height = params.stride_height;
119 const int stride_width = params.stride_width;
120
121 uint8_t acc[kPoolingAccTrancheSize];
122 for (int batch = 0; batch < batches; ++batch)
123 {
124 // We proceed through the depth in tranches (see comment above). The
125 // depth_base is the depth at the beginning of the tranche. The
126 // tranche_depth is the depth dimension of the tranche.
127 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
128 {
129 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
130 for (int out_y = 0; out_y < output_height; ++out_y)
131 {
132 for (int out_x = 0; out_x < output_width; ++out_x)
133 {
134 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
135 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
136 const int filter_x_start = std::max(0, -in_x_origin);
137 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
138 const int filter_y_start = std::max(0, -in_y_origin);
139 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
140 memset(acc, 0, tranche_depth * sizeof(acc[0]));
141 const uint8_t *input_ptr =
142 input_data + depth_base +
143 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
144 for (int fy = filter_y_start; fy < filter_y_end; fy++)
145 {
146 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
147 for (int fx = filter_x_start; fx < filter_x_end; fx++)
148 {
149 const uint8_t *input_channel_ptr = input_row_ptr;
150 int channel = 0;
151#ifdef USE_NEON
152 for (; channel <= tranche_depth - 16; channel += 16)
153 {
154 uint8x16_t acc_reg = vld1q_u8(acc + channel);
155 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
156 input_channel_ptr += 16;
157 acc_reg = vmaxq_u8(acc_reg, input_reg);
158 vst1q_u8(acc + channel, acc_reg);
159 }
160
161 for (; channel <= tranche_depth - 8; channel += 8)
162 {
163 uint8x8_t acc_reg = vld1_u8(acc + channel);
164 uint8x8_t input_reg = vld1_u8(input_channel_ptr);
165 input_channel_ptr += 8;
166 acc_reg = vmax_u8(acc_reg, input_reg);
167 vst1_u8(acc + channel, acc_reg);
168 }
169#endif
170 for (; channel < tranche_depth; ++channel)
171 {
172 acc[channel] = std::max(acc[channel], *input_channel_ptr++);
173 }
174 input_row_ptr += depth;
175 }
176 }
177 uint8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
178 int channel = 0;
179#ifdef USE_NEON
180 for (; channel <= tranche_depth - 16; channel += 16)
181 {
182 uint8x16_t a = vld1q_u8(acc + channel);
183 a = vminq_u8(a, vdupq_n_u8(params.quantized_activation_max));
184 a = vmaxq_u8(a, vdupq_n_u8(params.quantized_activation_min));
185 vst1q_u8(output_ptr + channel, a);
186 }
187 for (; channel <= tranche_depth - 8; channel += 8)
188 {
189 uint8x8_t a = vld1_u8(acc + channel);
190 a = vmin_u8(a, vdup_n_u8(params.quantized_activation_max));
191 a = vmax_u8(a, vdup_n_u8(params.quantized_activation_min));
192 vst1_u8(output_ptr + channel, a);
193 }
194#endif
195 for (; channel < tranche_depth; ++channel)
196 {
197 uint8_t a = acc[channel];
198 a = std::max<uint8_t>(a, params.quantized_activation_min);
199 a = std::min<uint8_t>(a, params.quantized_activation_max);
200 output_ptr[channel] = static_cast<uint8_t>(a);
201 }
202 }
203 }
204 }
205 }
206}
207
208} // namespace cker
209} // namespace nnfw
210
211#endif // __NNFW_CKER_MAX_POOL_H__
int32_t DimensionsCount() const
Definition Shape.h:91
int32_t Dims(int i) const
Definition Shape.h:92
const luci_interpreter::RuntimeShape output_shape
int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2)
Definition Shape.h:220
void MaxPool(const PoolParams &, const Shape &, const T *, const Shape &, T *)
Definition MaxPool.h:34
void MaxPool< float >(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition MaxPool.h:42
int Offset(const Shape &shape, int i0, int i1, int i2, int i3)
Definition Shape.h:237
void MaxPool< uint8_t >(const PoolParams &params, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
Definition MaxPool.h:98
int NodeOffset(int b, int h, int w, int height, int width)
Definition Utils.h:147
MatrixMap< Scalar > MapAsMatrixWithLastDimAsRows(Scalar *data, const Shape &shape)
Definition Utils.h:60
T ActivationFunctionWithMinMax(T x, T output_activation_min, T output_activation_max)
Definition Utils.h:43
Definition topk_v2.h:30
float float_activation_max
Definition Types.h:99
int32_t quantized_activation_min
Definition Types.h:95
int32_t quantized_activation_max
Definition Types.h:96
float float_activation_min
Definition Types.h:98
PaddingValues padding_values
Definition Types.h:89