ONE - On-device Neural Engine
Loading...
Searching...
No Matches
helpers_asymm.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * Copyright (c) 2017-2020 ARM Limited.
19 *
20 * SPDX-License-Identifier: MIT
21 *
22 * Permission is hereby granted, free of charge, to any person obtaining a copy
23 * of this software and associated documentation files (the "Software"), to
24 * deal in the Software without restriction, including without limitation the
25 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the Software is
27 * furnished to do so, subject to the following conditions:
28 *
29 * The above copyright notice and this permission notice shall be included in all
30 * copies or substantial portions of the Software.
31 *
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * SOFTWARE.
39 */
40#ifndef ARM_COMPUTE_HELPERS_ASYMM_H
41#define ARM_COMPUTE_HELPERS_ASYMM_H
42
43#include "helpers.h"
44
52#define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x)))
53#define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type)
54
63inline uchar quantize_qasymm8(float input, float offset, float scale)
64{
65 float out_f32 = input / scale + offset;
66 uchar res_u8 = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, int), uchar);
67 return res_u8;
68}
69
78inline float dequantize_qasymm8(uchar input, float offset, float scale)
79{
80 return ((float)input - offset) * scale;
81}
82
91inline float dequantize_qasymm8_signed(char input, float offset, float scale)
92{
93 return ((float)input - offset) * scale;
94}
95
103#define QUANTIZE_IMPL(type, size) \
104 inline VEC_DATA_TYPE(type, size) \
105 quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \
106 { \
107 VEC_DATA_TYPE(float, size) \
108 out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \
109 VEC_DATA_TYPE(type, size) \
110 res = \
111 CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size)); \
112 return res; \
113 }
114
122#define DEQUANTIZE_IMPL(type, size) \
123 inline VEC_DATA_TYPE(float, size) \
124 dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \
125 { \
126 return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \
127 }
128
135#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \
136 inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size( \
137 VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \
138 { \
139 const VEC_DATA_TYPE(int, size) zero = (VEC_DATA_TYPE(int, size))0; \
140 const VEC_DATA_TYPE(int, size) one = (VEC_DATA_TYPE(int, size))1; \
141 VEC_DATA_TYPE(int, size) \
142 mask = (one << exponent) - one; \
143 VEC_DATA_TYPE(int, size) \
144 threshold = (mask >> 1) + select(zero, one, x < 0); \
145 return (x >> exponent) + select(zero, one, (x & mask) > threshold); \
146 }
147
155#define ASYMM_MULT_IMPL(size) \
156 inline VEC_DATA_TYPE(int, size) \
157 asymm_mult##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
158 { \
159 VEC_DATA_TYPE(int, size) \
160 overflow = a == b && a == INT_MIN; \
161 VEC_DATA_TYPE(long, size) \
162 a_64 = convert_long##size(a); \
163 VEC_DATA_TYPE(long, size) \
164 b_64 = convert_long##size(b); \
165 VEC_DATA_TYPE(long, size) \
166 ab_64 = a_64 * b_64; \
167 /* Revert COMPMID-907 */ \
168 VEC_DATA_TYPE(long, size) \
169 mask1 = 1 << 30; \
170 VEC_DATA_TYPE(long, size) \
171 mask2 = 1 - (1 << 30); \
172 VEC_DATA_TYPE(long, size) \
173 is_positive_or_zero = ab_64 >= 0; \
174 VEC_DATA_TYPE(long, size) \
175 nudge = select(mask2, mask1, is_positive_or_zero); \
176 VEC_DATA_TYPE(long, size) \
177 mask = 1ll << 31; \
178 VEC_DATA_TYPE(int, size) \
179 ab_x2_high32 = convert_int##size((ab_64 + nudge) / mask); \
180 return select(ab_x2_high32, INT_MAX, overflow); \
181 }
182
189#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size) \
190 inline VEC_DATA_TYPE(int, size) \
191 asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) \
192 a) \
193 { \
194 const VEC_DATA_TYPE(int, size) constant_term = 1895147668; \
195 const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883; \
196 const int k_fractional_bits = 31; \
197 VEC_DATA_TYPE(int, size) \
198 x = a + (1 << (k_fractional_bits - 3)); \
199 VEC_DATA_TYPE(int, size) \
200 x2 = ASYMM_MULT(x, x, size); \
201 VEC_DATA_TYPE(int, size) \
202 x3 = ASYMM_MULT(x2, x, size); \
203 VEC_DATA_TYPE(int, size) \
204 x4 = ASYMM_MULT(x2, x2, size); \
205 VEC_DATA_TYPE(int, size) \
206 x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size); \
207 VEC_DATA_TYPE(int, size) \
208 x4_over_24_plus_x3_over_6_plus_x2 = \
209 ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2; \
210 VEC_DATA_TYPE(int, size) \
211 x4_over_24_plus_x3_over_6_plus_x2_over_2 = \
212 ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size); \
213 return constant_term + \
214 ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size); \
215 }
216
226#define ASYMM_SELECT_USING_MASK_IMPL(size) \
227 inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size(VEC_DATA_TYPE(int, size) if_mask, \
228 VEC_DATA_TYPE(int, size) then_val, \
229 VEC_DATA_TYPE(int, size) else_val) \
230 { \
231 return (if_mask & then_val) ^ (~if_mask & else_val); \
232 }
233
241#define ASYMM_MASK_IF_ZERO_IMPL(size) \
242 inline VEC_DATA_TYPE(int, size) asymm_mask_if_zero##size(VEC_DATA_TYPE(int, size) a) \
243 { \
244 const VEC_DATA_TYPE(int, size) all_zeros = 0; \
245 const VEC_DATA_TYPE(int, size) all_ones = ~0; \
246 return select(all_zeros, all_ones, a == 0); \
247 }
248
256#define ASYMM_MASK_IF_NON_ZERO_IMPL(size) \
257 inline VEC_DATA_TYPE(int, size) asymm_mask_if_non_zero##size(VEC_DATA_TYPE(int, size) a) \
258 { \
259 const VEC_DATA_TYPE(int, size) all_zeros = 0; \
260 const VEC_DATA_TYPE(int, size) all_ones = ~0; \
261 return select(all_zeros, all_ones, a != 0); \
262 }
263
264#define EXP_BARREL_SHIFTER_IMPL(size) \
265 inline VEC_DATA_TYPE(int, size) exp_barrel_shifter##size( \
266 VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, \
267 int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \
268 { \
269 if (k_integer_bits > exponent) \
270 { \
271 const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0; \
272 return ASYMM_SELECT_USING_MASK( \
273 ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size), \
274 ASYMM_MULT(result, fp_multiplier, size), result, size); \
275 } \
276 \
277 return result; \
278 }
279
286#define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size) \
287 inline VEC_DATA_TYPE(int, size) \
288 asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits) \
289 { \
290 const int k_fractional_bits = 31 - k_integer_bits; \
291 VEC_DATA_TYPE(int, size) \
292 k_one_quarter = 1 << (k_fractional_bits - 2); \
293 VEC_DATA_TYPE(int, size) \
294 mask = k_one_quarter - 1; \
295 VEC_DATA_TYPE(int, size) \
296 a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter; \
297 VEC_DATA_TYPE(int, size) \
298 a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits; \
299 VEC_DATA_TYPE(int, size) \
300 result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL( \
301 a_mod_quarter_minus_one_quarter_scaled, size); \
302 VEC_DATA_TYPE(int, size) \
303 remainder = a_mod_quarter_minus_one_quarter - a; \
304 \
305 result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, \
306 remainder, size); \
307 result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, \
308 remainder, size); \
309 result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, \
310 remainder, size); \
311 result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, \
312 remainder, size); \
313 result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, \
314 remainder, size); \
315 result = \
316 EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size); \
317 result = \
318 EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size); \
319 \
320 if (k_integer_bits > 5) \
321 { \
322 const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5)); \
323 result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size); \
324 } \
325 \
326 const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \
327 return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size); \
328 }
329
338#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size) \
339 inline VEC_DATA_TYPE(int, size) \
340 asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \
341 { \
342 if (exponent < 0) \
343 { \
344 return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size); \
345 } \
346 \
347 const VEC_DATA_TYPE(int, size) min = INT_MIN; \
348 const VEC_DATA_TYPE(int, size) max = INT_MAX; \
349 int threshold = ((1 << (31 - exponent)) - 1); \
350 VEC_DATA_TYPE(int, size) \
351 positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size); \
352 VEC_DATA_TYPE(int, size) \
353 negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size); \
354 VEC_DATA_TYPE(int, size) \
355 result = x << exponent; \
356 result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size); \
357 result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size); \
358 return result; \
359 }
360
368#define ASYMM_ROUNDING_HALF_SUM_IMPL(size) \
369 inline VEC_DATA_TYPE(int, size) \
370 asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \
371 { \
372 VEC_DATA_TYPE(long, size) \
373 a64 = convert_long##size(a); \
374 VEC_DATA_TYPE(long, size) \
375 b64 = convert_long##size(b); \
376 VEC_DATA_TYPE(long, size) \
377 sum = a64 + b64; \
378 const VEC_DATA_TYPE(long, size) one = 1; \
379 const VEC_DATA_TYPE(long, size) minus_one = -1; \
380 VEC_DATA_TYPE(long, size) \
381 sign = select(minus_one, one, sum >= 0); \
382 return convert_int##size((sum + sign) / 2); \
383 }
384
391#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(size) \
392 inline VEC_DATA_TYPE(int, size) \
393 asymm_one_over_one_plus_x_for_x_in_0_1##size(VEC_DATA_TYPE(int, size) a) \
394 { \
395 const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \
396 const VEC_DATA_TYPE(int, size) Q2_one = 1 << (31 - 2); \
397 VEC_DATA_TYPE(int, size) \
398 half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size); \
399 const VEC_DATA_TYPE(int, size) Q2_48_over_17 = 1515870810; \
400 const VEC_DATA_TYPE(int, size) Q2_neg_32_over_17 = -1010580540; \
401 VEC_DATA_TYPE(int, size) \
402 x = Q2_48_over_17 + ASYMM_MULT(half_denominator, Q2_neg_32_over_17, size); \
403 for (int i = 0; i < 3; i++) \
404 { \
405 VEC_DATA_TYPE(int, size) \
406 half_denominator_times_x = ASYMM_MULT(half_denominator, x, size); \
407 VEC_DATA_TYPE(int, size) \
408 one_minus_half_denominator_times_x = Q2_one - half_denominator_times_x; \
409 VEC_DATA_TYPE(int, size) \
410 tmp = ASYMM_MULT(x, one_minus_half_denominator_times_x, size); \
411 x = x + ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(tmp, 2, size); \
412 } \
413 return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, 1, size); \
414 }
415
423#define ASYMM_RESCALE_IMPL(size) \
424 inline VEC_DATA_TYPE(int, size) asymm_rescale##size(VEC_DATA_TYPE(int, size) value, \
425 int src_integer_bits, int dst_integer_bits) \
426 { \
427 int exponent = src_integer_bits - dst_integer_bits; \
428 return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \
429 }
430
431#define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale)
432#define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size)
433#define DEQUANTIZE_STR(input, offset, scale, type, size) \
434 dequantize_##type##size(input, offset, scale)
435#define DEQUANTIZE(input, offset, scale, type, size) \
436 DEQUANTIZE_STR(input, offset, scale, type, size)
437
438#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) \
439 asymm_rounding_divide_by_POW2_##size(x, exponent)
440#define ASYMM_MULT(a, b, size) asymm_mult##size(a, b)
441#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \
442 ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size)
443#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \
444 ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size)
445#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) \
446 asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a)
447#define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) \
448 asymm_select_using_mask##size(if_mask, then_val, else_val)
449#define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a)
450#define ASYMM_MASK_IF_NON_ZERO(a, size) asymm_mask_if_non_zero##size(a)
451#define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, \
452 remainder, size) \
453 exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, \
454 remainder)
455#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) \
456 asymm_exp_on_negative_values##size(a, k_integer_bits)
457#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) \
458 asymm_one_over_one_plus_x_for_x_in_0_1##size(a)
459#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) \
460 asymm_saturating_rounding_mult_by_pow2##size(x, exponent)
461#define ASYMM_ROUNDING_HALF_SUM(a, b, size) asymm_rounding_half_sum##size(a, b)
462#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) \
463 asymm_rescale##size(value, src_integer_bits, dst_integer_bits)
464
465#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \
466 inline VEC_DATA_TYPE(int, size) \
467 multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \
468 { \
469 const int left_shift = shift > 0 ? shift : 0; \
470 const int right_shift = shift > 0 ? 0 : -shift; \
471 return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), \
472 right_shift, size); \
473 }
474#define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) \
475 multiply_by_quantized_multiplier##size(input, qmul, shift)
476
477QUANTIZE_IMPL(uchar, 1)
478QUANTIZE_IMPL(char, 1)
479QUANTIZE_IMPL(uint, 1)
480QUANTIZE_IMPL(int, 1)
481QUANTIZE_IMPL(uchar, 4)
482QUANTIZE_IMPL(ushort, 4)
483QUANTIZE_IMPL(short, 4)
484QUANTIZE_IMPL(uchar, 16)
485QUANTIZE_IMPL(char, 16)
486QUANTIZE_IMPL(ushort, 16)
487QUANTIZE_IMPL(short, 16)
488QUANTIZE_IMPL(uint, 16)
489QUANTIZE_IMPL(int, 16)
490
491DEQUANTIZE_IMPL(uchar, 1)
492DEQUANTIZE_IMPL(char, 1)
493DEQUANTIZE_IMPL(uint, 1)
494DEQUANTIZE_IMPL(int, 1)
495DEQUANTIZE_IMPL(uchar, 4)
496DEQUANTIZE_IMPL(ushort, 4)
497DEQUANTIZE_IMPL(short, 4)
498DEQUANTIZE_IMPL(uchar, 16)
499DEQUANTIZE_IMPL(char, 16)
500DEQUANTIZE_IMPL(ushort, 16)
501DEQUANTIZE_IMPL(short, 16)
502DEQUANTIZE_IMPL(uint, 16)
503DEQUANTIZE_IMPL(int, 16)
504
510
516
521
527
533
539
544
549
555
560
565
571
577
578#endif // ARM_COMPUTE_HELPERS_ASYMM_H
#define CONVERT_SAT(x, type)
Definition helpers.h:284
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(size)
#define QUANTIZE_IMPL(type, size)
#define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size)
float dequantize_qasymm8_signed(char input, float offset, float scale)
#define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size)
uchar quantize_qasymm8(float input, float offset, float scale)
#define ASYMM_MASK_IF_NON_ZERO_IMPL(size)
#define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size)
#define ASYMM_MASK_IF_ZERO_IMPL(size)
#define ASYMM_ROUNDING_HALF_SUM_IMPL(size)
#define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size)
#define EXP_BARREL_SHIFTER_IMPL(size)
#define ASYMM_RESCALE_IMPL(size)
#define ASYMM_SELECT_USING_MASK_IMPL(size)
#define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size)
#define DEQUANTIZE_IMPL(type, size)
#define ASYMM_MULT_IMPL(size)
#define CONVERT_DOWN_RTE(x, type)
float dequantize_qasymm8(uchar input, float offset, float scale)