ONE - On-device Neural Engine
Loading...
Searching...
No Matches
AveragePool.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#ifndef __NNFW_CKER_AVERAGE_POOL_H__
19#define __NNFW_CKER_AVERAGE_POOL_H__
20
22#include "cker/eigen/Utils.h"
23#include "cker/Shape.h"
24#include "cker/Types.h"
25#include "cker/Utils.h"
26
27#include <Eigen/Core>
28
29namespace nnfw
30{
31namespace cker
32{
33
34// TODO Change to apply neon for this function if it is faster
35template <typename T>
36void AveragePool(const PoolParams &, const Shape &, const T *, const Shape &, T *)
37{
38 static_assert(std::is_integral<T>::value || std::is_floating_point<T>::value,
39 "cker::MaxPool : This function supports only integer or floating point");
40 throw std::runtime_error("cker::AveragePool : Unsupported data type");
41}
42
43template <>
44void AveragePool<float>(const PoolParams &params, const Shape &input_shape, const float *input_data,
45 const Shape &output_shape, float *output_data)
46{
47 assert(input_shape.DimensionsCount() == 4);
48 assert(output_shape.DimensionsCount() == 4);
49 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
50 const int input_height = input_shape.Dims(1);
51 const int input_width = input_shape.Dims(2);
52 const int output_height = output_shape.Dims(1);
53 const int output_width = output_shape.Dims(2);
54 const int stride_height = params.stride_height;
55 const int stride_width = params.stride_width;
56
57 // TODO(benoitjacob) make this a proper reference impl without Eigen!
58 const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
59 auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
60 // TODO(benoitjacob) get rid of the dynamic memory allocation here!
61 Eigen::VectorXf out_count(out_mat.cols());
62 out_count.setZero();
63 // Prefill the output to 0.
64 out_mat.setZero();
65 for (int b = 0; b < batches; ++b)
66 {
67 for (int h = 0; h < input_height; ++h)
68 {
69 for (int w = 0; w < input_width; ++w)
70 {
71 // (h_start, h_end) * (w_start, w_end) is the range that the input
72 // vector projects to.
73 int hpad = h + params.padding_values.height;
74 int wpad = w + params.padding_values.width;
75 int h_start =
76 (hpad < params.filter_height) ? 0 : (hpad - params.filter_height) / stride_height + 1;
77 int h_end = std::min(hpad / stride_height + 1, output_height);
78 int w_start =
79 (wpad < params.filter_width) ? 0 : (wpad - params.filter_width) / stride_width + 1;
80 int w_end = std::min(wpad / stride_width + 1, output_width);
81 // compute elementwise sum
82 for (int ph = h_start; ph < h_end; ++ph)
83 {
84 for (int pw = w_start; pw < w_end; ++pw)
85 {
86 int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
87 out_mat.col(out_offset) += in_mat.col(NodeOffset(b, h, w, input_height, input_width));
88 out_count(out_offset)++;
89 }
90 }
91 }
92 }
93 }
94 // Divide the output by the actual number of elements being averaged over
95 assert(out_count.minCoeff() > 0);
96 out_mat.array().rowwise() /= out_count.transpose().array();
97
98 const int flat_size = output_shape.FlatSize();
99 for (int i = 0; i < flat_size; ++i)
100 {
101 output_data[i] = ActivationFunctionWithMinMax(output_data[i], params.float_activation_min,
102 params.float_activation_max);
103 }
104}
105
106inline void AveragePool16(const PoolParams &params, const Shape &input_shape,
107 const uint8_t *input_data, const Shape &output_shape,
108 uint8_t *output_data)
109{
110 // Here, and in other pooling ops, in order to maintain locality of reference,
111 // to minimize some recalculations, and to load into NEON vector registers, we
112 // use an inner loop down the depth. Since depths can be large and hence we
113 // would need arbitrarily large temporary storage, we divide the work up into
114 // depth tranches just within the batch loop.
115 static constexpr int kPoolingAccTrancheSize = 256;
116
117 assert(params.quantized_activation_min <= params.quantized_activation_max);
118 assert(input_shape.DimensionsCount() == 4);
119 assert(output_shape.DimensionsCount() == 4);
120 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
121 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
122 const int input_height = input_shape.Dims(1);
123 const int input_width = input_shape.Dims(2);
124 const int output_height = output_shape.Dims(1);
125 const int output_width = output_shape.Dims(2);
126 const int stride_height = params.stride_height;
127 const int stride_width = params.stride_width;
128
129 uint16_t acc[kPoolingAccTrancheSize];
130 for (int batch = 0; batch < batches; ++batch)
131 {
132 // We proceed through the depth in tranches (see comment above). The
133 // depth_base is the depth at the beginning of the tranche. The
134 // tranche_depth is the depth dimension of the tranche.
135 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
136 {
137 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
138 for (int out_y = 0; out_y < output_height; ++out_y)
139 {
140 for (int out_x = 0; out_x < output_width; ++out_x)
141 {
142 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
143 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
144 const int filter_x_start = std::max(0, -in_x_origin);
145 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
146 const int filter_y_start = std::max(0, -in_y_origin);
147 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
148 const int filter_count =
149 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
150 memset(acc, 0, tranche_depth * sizeof(acc[0]));
151 const uint8_t *input_ptr =
152 input_data + depth_base +
153 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
154 for (int fy = filter_y_start; fy < filter_y_end; fy++)
155 {
156 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
157 for (int fx = filter_x_start; fx < filter_x_end; fx++)
158 {
159 const uint8_t *input_channel_ptr = input_row_ptr;
160 int channel = 0;
161#ifdef USE_NEON
162 for (; channel <= tranche_depth - 16; channel += 16)
163 {
164 uint16x8_t acc_reg[2];
165 for (int i = 0; i < 2; i++)
166 {
167 acc_reg[i] = vld1q_u16(acc + channel + 8 * i);
168 }
169 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
170 input_channel_ptr += 16;
171 acc_reg[0] = vaddw_u8(acc_reg[0], vget_low_u8(input_reg));
172 acc_reg[1] = vaddw_u8(acc_reg[1], vget_high_u8(input_reg));
173 for (int i = 0; i < 2; i++)
174 {
175 vst1q_u16(acc + channel + 8 * i, acc_reg[i]);
176 }
177 }
178 for (; channel <= tranche_depth - 8; channel += 8)
179 {
180 uint16x8_t acc_reg = vld1q_u16(acc + channel);
181 uint8x8_t input_reg = vld1_u8(input_channel_ptr);
182 input_channel_ptr += 8;
183 acc_reg = vaddw_u8(acc_reg, input_reg);
184 vst1q_u16(acc + channel, acc_reg);
185 }
186#endif
187 for (; channel < tranche_depth; ++channel)
188 {
189 acc[channel] += *input_channel_ptr++;
190 }
191 input_row_ptr += depth;
192 }
193 }
194 uint8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
195 int channel = 0;
196#ifdef USE_NEON
197#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
198 if (filter_count == FILTER_COUNT) \
199 { \
200 for (; channel <= tranche_depth - 8; channel += 8) \
201 { \
202 uint16_t buf[8]; \
203 for (int i = 0; i < 8; i++) \
204 { \
205 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
206 } \
207 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
208 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
209 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
210 vst1_u8(output_ptr + channel, buf8); \
211 } \
212 }
213 AVGPOOL_DIVIDING_BY(9)
214 AVGPOOL_DIVIDING_BY(15)
215#undef AVGPOOL_DIVIDING_BY
216 for (; channel <= tranche_depth - 8; channel += 8)
217 {
218 uint16_t buf[8];
219 for (int i = 0; i < 8; i++)
220 {
221 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
222 }
223 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
224 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
225 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
226 vst1_u8(output_ptr + channel, buf8);
227 }
228#endif
229 for (; channel < tranche_depth; ++channel)
230 {
231 uint8_t a = (acc[channel] + filter_count / 2) / filter_count;
232 a = std::max<uint16_t>(a, params.quantized_activation_min);
233 a = std::min<uint16_t>(a, params.quantized_activation_max);
234 output_ptr[channel] = static_cast<uint8_t>(a);
235 }
236 }
237 }
238 }
239 }
240}
241
242inline void AveragePool32(const PoolParams &params, const Shape &input_shape,
243 const uint8_t *input_data, const Shape &output_shape,
244 uint8_t *output_data)
245{
246
247 // Here, and in other pooling ops, in order to maintain locality of reference,
248 // to minimize some recalculations, and to load into NEON vector registers, we
249 // use an inner loop down the depth. Since depths can be large and hence we
250 // would need arbitrarily large temporary storage, we divide the work up into
251 // depth tranches just within the batch loop.
252 static constexpr int kPoolingAccTrancheSize = 256;
253
254 assert(params.quantized_activation_min <= params.quantized_activation_max);
255 assert(input_shape.DimensionsCount() == 4);
256 assert(output_shape.DimensionsCount() == 4);
257 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
258 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
259 const int input_height = input_shape.Dims(1);
260 const int input_width = input_shape.Dims(2);
261 const int output_height = output_shape.Dims(1);
262 const int output_width = output_shape.Dims(2);
263 const int stride_height = params.stride_height;
264 const int stride_width = params.stride_width;
265
266 uint32_t acc[kPoolingAccTrancheSize];
267 for (int batch = 0; batch < batches; ++batch)
268 {
269 // We proceed through the depth in tranches (see comment above). The
270 // depth_base is the depth at the beginning of the tranche. The
271 // tranche_depth is the depth dimension of the tranche.
272 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
273 {
274 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
275 for (int out_y = 0; out_y < output_height; ++out_y)
276 {
277 for (int out_x = 0; out_x < output_width; ++out_x)
278 {
279 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
280 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
281 const int filter_x_start = std::max(0, -in_x_origin);
282 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
283 const int filter_y_start = std::max(0, -in_y_origin);
284 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
285 const int filter_count =
286 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
287 memset(acc, 0, tranche_depth * sizeof(acc[0]));
288 const uint8_t *input_ptr =
289 input_data + depth_base +
290 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
291 for (int fy = filter_y_start; fy < filter_y_end; fy++)
292 {
293 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
294 for (int fx = filter_x_start; fx < filter_x_end; fx++)
295 {
296 const uint8_t *input_channel_ptr = input_row_ptr;
297 int channel = 0;
298#ifdef USE_NEON
299 for (; channel <= tranche_depth - 16; channel += 16)
300 {
301 uint16x4_t acc_reg[4];
302 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
303 input_channel_ptr += 16;
304 acc_reg[0] = vget_low_u16(vmovl_u8(vget_low_u8(input_reg)));
305 acc_reg[1] = vget_high_u16(vmovl_u8(vget_low_u8(input_reg)));
306 acc_reg[2] = vget_low_u16(vmovl_u8(vget_high_u8(input_reg)));
307 acc_reg[3] = vget_high_u16(vmovl_u8(vget_high_u8(input_reg)));
308 for (int i = 0; i < 4; i++)
309 {
310 vst1q_u32(acc + channel + 4 * i,
311 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
312 }
313 }
314 for (; channel <= tranche_depth - 8; channel += 8)
315 {
316 uint16x4_t acc_reg[2];
317 uint16x8_t input_reg = vmovl_u8(vld1_u8(input_channel_ptr));
318 input_channel_ptr += 8;
319 acc_reg[0] = vget_low_u16(input_reg);
320 acc_reg[1] = vget_high_u16(input_reg);
321 for (int i = 0; i < 2; i++)
322 {
323 vst1q_u32(acc + channel + 4 * i,
324 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
325 }
326 }
327#endif
328 for (; channel < tranche_depth; ++channel)
329 {
330 acc[channel] += *input_channel_ptr++;
331 }
332 input_row_ptr += depth;
333 }
334 }
335 uint8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
336 int channel = 0;
337#ifdef USE_NEON
338#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
339 if (filter_count == FILTER_COUNT) \
340 { \
341 for (; channel <= tranche_depth - 8; channel += 8) \
342 { \
343 uint16_t buf[8]; \
344 for (int i = 0; i < 8; i++) \
345 { \
346 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
347 } \
348 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
349 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
350 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
351 vst1_u8(output_ptr + channel, buf8); \
352 } \
353 }
354 AVGPOOL_DIVIDING_BY(9)
355 AVGPOOL_DIVIDING_BY(15)
356#undef AVGPOOL_DIVIDING_BY
357 for (; channel <= tranche_depth - 8; channel += 8)
358 {
359 uint16_t buf[8];
360 for (int i = 0; i < 8; i++)
361 {
362 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
363 }
364 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
365 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
366 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
367 vst1_u8(output_ptr + channel, buf8);
368 }
369#endif
370 for (; channel < tranche_depth; ++channel)
371 {
372 uint16_t a = (acc[channel] + filter_count / 2) / filter_count;
373 a = std::max<uint16_t>(a, params.quantized_activation_min);
374 a = std::min<uint16_t>(a, params.quantized_activation_max);
375 output_ptr[channel] = static_cast<uint8_t>(a);
376 }
377 }
378 }
379 }
380 }
381}
382
383template <>
384void AveragePool<uint8_t>(const PoolParams &params, const Shape &input_shape,
385 const uint8_t *input_data, const Shape &output_shape,
386 uint8_t *output_data)
387{
388 if (params.filter_height * params.filter_width > 16 * 16)
389 {
390 AveragePool32(params, input_shape, input_data, output_shape, output_data);
391 }
392 else
393 {
394 AveragePool16(params, input_shape, input_data, output_shape, output_data);
395 }
396}
397
398template <>
399void AveragePool<int8_t>(const PoolParams &params, const Shape &input_shape,
400 const int8_t *input_data, const Shape &output_shape, int8_t *output_data)
401{
402 // Here, and in other pooling ops, in order to maintain locality of reference,
403 // to minimize some recalculations, and to load into NEON vector registers, we
404 // use an inner loop down the depth. Since depths can be large and hence we
405 // would need arbitrarily large temporary storage, we divide the work up into
406 // depth tranches just within the batch loop.
407 static constexpr int kPoolingAccTrancheSize = 256;
408
409 assert(params.quantized_activation_min <= params.quantized_activation_max);
410 assert(input_shape.DimensionsCount() == 4);
411 assert(output_shape.DimensionsCount() == 4);
412 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
413 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
414 const int input_height = input_shape.Dims(1);
415 const int input_width = input_shape.Dims(2);
416 const int output_height = output_shape.Dims(1);
417 const int output_width = output_shape.Dims(2);
418 const int stride_height = params.stride_height;
419 const int stride_width = params.stride_width;
420
421 int32_t acc[kPoolingAccTrancheSize];
422 for (int batch = 0; batch < batches; ++batch)
423 {
424 // We proceed through the depth in tranches (see comment above). The
425 // depth_base is the depth at the beginning of the tranche. The
426 // tranche_depth is the depth dimension of the tranche.
427 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
428 {
429 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
430 for (int out_y = 0; out_y < output_height; ++out_y)
431 {
432 for (int out_x = 0; out_x < output_width; ++out_x)
433 {
434 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
435 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
436 const int filter_x_start = std::max(0, -in_x_origin);
437 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
438 const int filter_y_start = std::max(0, -in_y_origin);
439 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
440 const int filter_count =
441 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
442 memset(acc, 0, tranche_depth * sizeof(acc[0]));
443 const int8_t *input_ptr =
444 input_data + depth_base +
445 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
446 for (int fy = filter_y_start; fy < filter_y_end; fy++)
447 {
448 const int8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
449 for (int fx = filter_x_start; fx < filter_x_end; fx++)
450 {
451 const int8_t *input_channel_ptr = input_row_ptr;
452 int channel = 0;
453#ifdef USE_NEON
454 for (; channel <= tranche_depth - 16; channel += 16)
455 {
456 int16x4_t acc_reg[4];
457 int8x16_t input_reg = vld1q_s8(input_channel_ptr);
458 input_channel_ptr += 16;
459 acc_reg[0] = vget_low_s16(vmovl_s8(vget_low_s8(input_reg)));
460 acc_reg[1] = vget_high_s16(vmovl_s8(vget_low_s8(input_reg)));
461 acc_reg[2] = vget_low_s16(vmovl_s8(vget_high_s8(input_reg)));
462 acc_reg[3] = vget_high_s16(vmovl_s8(vget_high_s8(input_reg)));
463 for (int i = 0; i < 4; i++)
464 {
465 vst1q_s32(acc + channel + 4 * i,
466 vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
467 }
468 }
469 for (; channel <= tranche_depth - 8; channel += 8)
470 {
471 int16x4_t acc_reg[2];
472 int16x8_t input_reg = vmovl_s8(vld1_s8(input_channel_ptr));
473 input_channel_ptr += 8;
474 acc_reg[0] = vget_low_s16(input_reg);
475 acc_reg[1] = vget_high_s16(input_reg);
476 for (int i = 0; i < 2; i++)
477 {
478 vst1q_s32(acc + channel + 4 * i,
479 vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
480 }
481 }
482#endif
483 for (; channel < tranche_depth; ++channel)
484 {
485 acc[channel] += *input_channel_ptr++;
486 }
487 input_row_ptr += depth;
488 }
489 }
490 int8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
491 int channel = 0;
492#ifdef USE_NEON
493 for (; channel <= tranche_depth - 8; channel += 8)
494 {
495 int16_t buf[8];
496 for (int i = 0; i < 8; i++)
497 {
498 buf[i] = acc[channel + i] > 0 ? (acc[channel + i] + filter_count / 2) / filter_count
499 : (acc[channel + i] - filter_count / 2) / filter_count;
500 }
501 int8x8_t buf8 = vqmovn_s16(vld1q_s16(buf));
502 buf8 = vmin_s8(buf8, vdup_n_s8(params.quantized_activation_max));
503 buf8 = vmax_s8(buf8, vdup_n_s8(params.quantized_activation_min));
504 vst1_s8(output_ptr + channel, buf8);
505 }
506#endif
507 for (; channel < tranche_depth; ++channel)
508 {
509 int16_t a = acc[channel] > 0 ? (acc[channel] + filter_count / 2) / filter_count
510 : (acc[channel] - filter_count / 2) / filter_count;
511 a = std::max<int16_t>(a, params.quantized_activation_min);
512 a = std::min<int16_t>(a, params.quantized_activation_max);
513 output_ptr[channel] = static_cast<int8_t>(a);
514 }
515 }
516 }
517 }
518 }
519}
520
521} // namespace cker
522} // namespace nnfw
523
524#endif // __NNFW_CKER_AVERAGE_POOL_H__
int32_t DimensionsCount() const
Definition Shape.h:91
int32_t Dims(int i) const
Definition Shape.h:92
const luci_interpreter::RuntimeShape output_shape
int MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2)
Definition Shape.h:220
int Offset(const Shape &shape, int i0, int i1, int i2, int i3)
Definition Shape.h:237
void AveragePool< float >(const PoolParams &params, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data)
Definition AveragePool.h:44
int NodeOffset(int b, int h, int w, int height, int width)
Definition Utils.h:147
MatrixMap< Scalar > MapAsMatrixWithLastDimAsRows(Scalar *data, const Shape &shape)
Definition Utils.h:60
T ActivationFunctionWithMinMax(T x, T output_activation_min, T output_activation_max)
Definition Utils.h:43
void AveragePool(const PoolParams &, const Shape &, const T *, const Shape &, T *)
Definition AveragePool.h:36
void AveragePool32(const PoolParams &params, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
void AveragePool< int8_t >(const PoolParams &params, const Shape &input_shape, const int8_t *input_data, const Shape &output_shape, int8_t *output_data)
void AveragePool16(const PoolParams &params, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
void AveragePool< uint8_t >(const PoolParams &params, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
Definition topk_v2.h:30
float float_activation_max
Definition Types.h:99
int32_t quantized_activation_min
Definition Types.h:95
int32_t quantized_activation_max
Definition Types.h:96
float float_activation_min
Definition Types.h:98
PaddingValues padding_values
Definition Types.h:89