50 const int input_height = input_shape.
Dims(1);
51 const int input_width = input_shape.
Dims(2);
61 Eigen::VectorXf out_count(out_mat.cols());
65 for (
int b = 0; b < batches; ++b)
67 for (
int h = 0; h < input_height; ++h)
69 for (
int w = 0; w < input_width; ++w)
77 int h_end = std::min(hpad / stride_height + 1, output_height);
80 int w_end = std::min(wpad / stride_width + 1, output_width);
82 for (
int ph = h_start; ph < h_end; ++ph)
84 for (
int pw = w_start; pw < w_end; ++pw)
86 int out_offset =
NodeOffset(b, ph, pw, output_height, output_width);
87 out_mat.col(out_offset) += in_mat.col(
NodeOffset(b, h, w, input_height, input_width));
88 out_count(out_offset)++;
95 assert(out_count.minCoeff() > 0);
96 out_mat.array().rowwise() /= out_count.transpose().array();
99 for (
int i = 0; i < flat_size; ++i)
108 uint8_t *output_data)
115 static constexpr int kPoolingAccTrancheSize = 256;
122 const int input_height = input_shape.
Dims(1);
123 const int input_width = input_shape.
Dims(2);
129 uint16_t acc[kPoolingAccTrancheSize];
130 for (
int batch = 0; batch < batches; ++batch)
135 for (
int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
137 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
138 for (
int out_y = 0; out_y < output_height; ++out_y)
140 for (
int out_x = 0; out_x < output_width; ++out_x)
144 const int filter_x_start = std::max(0, -in_x_origin);
145 const int filter_x_end = std::min(params.
filter_width, input_width - in_x_origin);
146 const int filter_y_start = std::max(0, -in_y_origin);
147 const int filter_y_end = std::min(params.
filter_height, input_height - in_y_origin);
148 const int filter_count =
149 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
150 memset(acc, 0, tranche_depth *
sizeof(acc[0]));
151 const uint8_t *input_ptr =
152 input_data + depth_base +
153 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
154 for (
int fy = filter_y_start; fy < filter_y_end; fy++)
156 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
157 for (
int fx = filter_x_start; fx < filter_x_end; fx++)
159 const uint8_t *input_channel_ptr = input_row_ptr;
162 for (; channel <= tranche_depth - 16; channel += 16)
164 uint16x8_t acc_reg[2];
165 for (
int i = 0; i < 2; i++)
167 acc_reg[i] = vld1q_u16(acc + channel + 8 * i);
169 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
170 input_channel_ptr += 16;
171 acc_reg[0] = vaddw_u8(acc_reg[0], vget_low_u8(input_reg));
172 acc_reg[1] = vaddw_u8(acc_reg[1], vget_high_u8(input_reg));
173 for (
int i = 0; i < 2; i++)
175 vst1q_u16(acc + channel + 8 * i, acc_reg[i]);
178 for (; channel <= tranche_depth - 8; channel += 8)
180 uint16x8_t acc_reg = vld1q_u16(acc + channel);
181 uint8x8_t input_reg = vld1_u8(input_channel_ptr);
182 input_channel_ptr += 8;
183 acc_reg = vaddw_u8(acc_reg, input_reg);
184 vst1q_u16(acc + channel, acc_reg);
187 for (; channel < tranche_depth; ++channel)
189 acc[channel] += *input_channel_ptr++;
191 input_row_ptr += depth;
194 uint8_t *output_ptr = output_data +
Offset(
output_shape, batch, out_y, out_x, depth_base);
197#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
198 if (filter_count == FILTER_COUNT) \
200 for (; channel <= tranche_depth - 8; channel += 8) \
203 for (int i = 0; i < 8; i++) \
205 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
207 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
208 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
209 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
210 vst1_u8(output_ptr + channel, buf8); \
213 AVGPOOL_DIVIDING_BY(9)
214 AVGPOOL_DIVIDING_BY(15)
215#undef AVGPOOL_DIVIDING_BY
216 for (; channel <= tranche_depth - 8; channel += 8)
219 for (
int i = 0; i < 8; i++)
221 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
223 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
226 vst1_u8(output_ptr + channel, buf8);
229 for (; channel < tranche_depth; ++channel)
231 uint8_t a = (acc[channel] + filter_count / 2) / filter_count;
234 output_ptr[channel] =
static_cast<uint8_t
>(a);
244 uint8_t *output_data)
252 static constexpr int kPoolingAccTrancheSize = 256;
259 const int input_height = input_shape.
Dims(1);
260 const int input_width = input_shape.
Dims(2);
266 uint32_t acc[kPoolingAccTrancheSize];
267 for (
int batch = 0; batch < batches; ++batch)
272 for (
int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
274 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
275 for (
int out_y = 0; out_y < output_height; ++out_y)
277 for (
int out_x = 0; out_x < output_width; ++out_x)
281 const int filter_x_start = std::max(0, -in_x_origin);
282 const int filter_x_end = std::min(params.
filter_width, input_width - in_x_origin);
283 const int filter_y_start = std::max(0, -in_y_origin);
284 const int filter_y_end = std::min(params.
filter_height, input_height - in_y_origin);
285 const int filter_count =
286 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
287 memset(acc, 0, tranche_depth *
sizeof(acc[0]));
288 const uint8_t *input_ptr =
289 input_data + depth_base +
290 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
291 for (
int fy = filter_y_start; fy < filter_y_end; fy++)
293 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
294 for (
int fx = filter_x_start; fx < filter_x_end; fx++)
296 const uint8_t *input_channel_ptr = input_row_ptr;
299 for (; channel <= tranche_depth - 16; channel += 16)
301 uint16x4_t acc_reg[4];
302 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
303 input_channel_ptr += 16;
304 acc_reg[0] = vget_low_u16(vmovl_u8(vget_low_u8(input_reg)));
305 acc_reg[1] = vget_high_u16(vmovl_u8(vget_low_u8(input_reg)));
306 acc_reg[2] = vget_low_u16(vmovl_u8(vget_high_u8(input_reg)));
307 acc_reg[3] = vget_high_u16(vmovl_u8(vget_high_u8(input_reg)));
308 for (
int i = 0; i < 4; i++)
310 vst1q_u32(acc + channel + 4 * i,
311 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
314 for (; channel <= tranche_depth - 8; channel += 8)
316 uint16x4_t acc_reg[2];
317 uint16x8_t input_reg = vmovl_u8(vld1_u8(input_channel_ptr));
318 input_channel_ptr += 8;
319 acc_reg[0] = vget_low_u16(input_reg);
320 acc_reg[1] = vget_high_u16(input_reg);
321 for (
int i = 0; i < 2; i++)
323 vst1q_u32(acc + channel + 4 * i,
324 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
328 for (; channel < tranche_depth; ++channel)
330 acc[channel] += *input_channel_ptr++;
332 input_row_ptr += depth;
335 uint8_t *output_ptr = output_data +
Offset(
output_shape, batch, out_y, out_x, depth_base);
338#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
339 if (filter_count == FILTER_COUNT) \
341 for (; channel <= tranche_depth - 8; channel += 8) \
344 for (int i = 0; i < 8; i++) \
346 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
348 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
349 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
350 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
351 vst1_u8(output_ptr + channel, buf8); \
354 AVGPOOL_DIVIDING_BY(9)
355 AVGPOOL_DIVIDING_BY(15)
356#undef AVGPOOL_DIVIDING_BY
357 for (; channel <= tranche_depth - 8; channel += 8)
360 for (
int i = 0; i < 8; i++)
362 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
364 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
367 vst1_u8(output_ptr + channel, buf8);
370 for (; channel < tranche_depth; ++channel)
372 uint16_t a = (acc[channel] + filter_count / 2) / filter_count;
375 output_ptr[channel] =
static_cast<uint8_t
>(a);
407 static constexpr int kPoolingAccTrancheSize = 256;
414 const int input_height = input_shape.
Dims(1);
415 const int input_width = input_shape.
Dims(2);
421 int32_t acc[kPoolingAccTrancheSize];
422 for (
int batch = 0; batch < batches; ++batch)
427 for (
int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
429 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
430 for (
int out_y = 0; out_y < output_height; ++out_y)
432 for (
int out_x = 0; out_x < output_width; ++out_x)
436 const int filter_x_start = std::max(0, -in_x_origin);
437 const int filter_x_end = std::min(params.
filter_width, input_width - in_x_origin);
438 const int filter_y_start = std::max(0, -in_y_origin);
439 const int filter_y_end = std::min(params.
filter_height, input_height - in_y_origin);
440 const int filter_count =
441 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
442 memset(acc, 0, tranche_depth *
sizeof(acc[0]));
443 const int8_t *input_ptr =
444 input_data + depth_base +
445 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
446 for (
int fy = filter_y_start; fy < filter_y_end; fy++)
448 const int8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
449 for (
int fx = filter_x_start; fx < filter_x_end; fx++)
451 const int8_t *input_channel_ptr = input_row_ptr;
454 for (; channel <= tranche_depth - 16; channel += 16)
456 int16x4_t acc_reg[4];
457 int8x16_t input_reg = vld1q_s8(input_channel_ptr);
458 input_channel_ptr += 16;
459 acc_reg[0] = vget_low_s16(vmovl_s8(vget_low_s8(input_reg)));
460 acc_reg[1] = vget_high_s16(vmovl_s8(vget_low_s8(input_reg)));
461 acc_reg[2] = vget_low_s16(vmovl_s8(vget_high_s8(input_reg)));
462 acc_reg[3] = vget_high_s16(vmovl_s8(vget_high_s8(input_reg)));
463 for (
int i = 0; i < 4; i++)
465 vst1q_s32(acc + channel + 4 * i,
466 vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
469 for (; channel <= tranche_depth - 8; channel += 8)
471 int16x4_t acc_reg[2];
472 int16x8_t input_reg = vmovl_s8(vld1_s8(input_channel_ptr));
473 input_channel_ptr += 8;
474 acc_reg[0] = vget_low_s16(input_reg);
475 acc_reg[1] = vget_high_s16(input_reg);
476 for (
int i = 0; i < 2; i++)
478 vst1q_s32(acc + channel + 4 * i,
479 vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
483 for (; channel < tranche_depth; ++channel)
485 acc[channel] += *input_channel_ptr++;
487 input_row_ptr += depth;
490 int8_t *output_ptr = output_data +
Offset(
output_shape, batch, out_y, out_x, depth_base);
493 for (; channel <= tranche_depth - 8; channel += 8)
496 for (
int i = 0; i < 8; i++)
498 buf[i] = acc[channel + i] > 0 ? (acc[channel + i] + filter_count / 2) / filter_count
499 : (acc[channel + i] - filter_count / 2) / filter_count;
501 int8x8_t buf8 = vqmovn_s16(vld1q_s16(buf));
504 vst1_s8(output_ptr + channel, buf8);
507 for (; channel < tranche_depth; ++channel)
509 int16_t a = acc[channel] > 0 ? (acc[channel] + filter_count / 2) / filter_count
510 : (acc[channel] - filter_count / 2) / filter_count;
513 output_ptr[channel] =
static_cast<int8_t
>(a);