ONE - On-device Neural Engine
Loading...
Searching...
No Matches
PALMul.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
3
* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
4
*
5
* Licensed under the Apache License, Version 2.0 (the "License");
6
* you may not use this file except in compliance with the License.
7
* You may obtain a copy of the License at
8
*
9
* http://www.apache.org/licenses/LICENSE-2.0
10
*
11
* Unless required by applicable law or agreed to in writing, software
12
* distributed under the License is distributed on an "AS IS" BASIS,
13
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
* See the License for the specific language governing permissions and
15
* limitations under the License.
16
*/
17
18
#ifndef ONERT_MICRO_EXECUTE_PAL_MUL_H
19
#define ONERT_MICRO_EXECUTE_PAL_MUL_H
20
21
#include "PALMulCommon.h"
22
23
namespace
onert_micro
24
{
25
namespace
execute
26
{
27
namespace
pal
28
{
29
30
template
<
typename
InputType,
typename
OutputType>
31
OMStatus
Mul
(
const
core::ArithmeticQuantParams
¶ms, uint32_t
size
,
const
InputType *input1_data,
32
const
InputType *input2_data, OutputType *output_data)
33
{
34
for
(
int
i = 0; i <
size
; ++i)
35
{
36
const
int32_t input1_val = params.
input1_offset
+ input1_data[i];
37
const
int32_t input2_val = params.
input2_offset
+ input2_data[i];
38
const
int32_t unclamped_result =
39
params.
output_offset
+
multiplyByQuantizedMultiplier
(input1_val * input2_val,
40
params.
output_multiplier
,
41
params.
output_shift
);
42
const
int32_t clamped_output = std::min(
43
params.
quantized_activation_max
, std::max(params.
quantized_activation_min
, unclamped_result));
44
output_data[i] =
static_cast<
OutputType
>
(clamped_output);
45
}
46
return
Ok
;
47
}
48
49
}
// namespace pal
50
}
// namespace execute
51
}
// namespace onert_micro
52
53
#endif
// ONERT_MICRO_EXECUTE_PAL_MUL_H
onert_micro::execute::pal::Mul
OMStatus Mul(const core::ArithmeticQuantParams ¶ms, const uint32_t flat_size, const int8_t *input1_data, const int8_t *input2_data, int8_t *output_data)
Definition
PALMul.h:33
onert_micro::execute::pal::multiplyByQuantizedMultiplier
int32_t multiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier, int shift)
Definition
PALUtils.h:104
onert_micro
Definition
OMMemoryManager.h:26
onert_micro::OMStatus
OMStatus
Definition
OMStatus.h:24
onert_micro::Ok
@ Ok
Definition
OMStatus.h:25
size
int32_t size[5]
Definition
Slice.cpp:35
onert_micro::core::ArithmeticQuantParams
Definition
OMKernelData.h:111
onert_micro::core::ArithmeticQuantParams::output_offset
int32_t output_offset
Definition
OMKernelData.h:121
onert_micro::core::ArithmeticQuantParams::quantized_activation_min
int32_t quantized_activation_min
Definition
OMKernelData.h:123
onert_micro::core::ArithmeticQuantParams::quantized_activation_max
int32_t quantized_activation_max
Definition
OMKernelData.h:122
onert_micro::core::ArithmeticQuantParams::output_shift
int output_shift
Definition
OMKernelData.h:120
onert_micro::core::ArithmeticQuantParams::input2_offset
int32_t input2_offset
Definition
OMKernelData.h:113
onert_micro::core::ArithmeticQuantParams::input1_offset
int32_t input1_offset
Definition
OMKernelData.h:112
onert_micro::core::ArithmeticQuantParams::output_multiplier
int32_t output_multiplier
Definition
OMKernelData.h:119
onert-micro
onert-micro
include
pal
mcu
PALMul.h
Generated by
1.9.8