ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Dequantize.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18#include "OMStatus.h"
20#include "core/OMUtils.h"
21#include "PALDequantize.h"
22#include "core/OMRuntimeShape.h"
23
24#include "execute/OMUtils.h"
25
26using namespace onert_micro;
27using namespace onert_micro::execute;
28
29namespace
30{
31
32constexpr uint32_t inputTensorIdx = 0;
33constexpr uint32_t outputTensorIdx = 0;
34
35} // namespace
36
37// NOTE: doesnt currently support dynamic shapes
38OMStatus onert_micro::execute::execute_kernel_CircleDequantize(const OMExecuteArgs &execute_args)
39{
40 const circle::Tensor *input = nullptr;
41 const circle::Tensor *output = nullptr;
42
43 uint8_t *input_data = nullptr;
44 uint8_t *output_data = nullptr;
45
46 SISOHeader(execute_args, &input, &output, &input_data, &output_data);
47
48 assert(output->type() == circle::TensorType_FLOAT32);
49
50 OMStatus status = Ok;
51 switch (input->type())
52 {
53#ifndef DIS_FLOAT
54 case circle::TensorType_INT8:
55 {
56 assert(input->quantization() != nullptr);
57 assert(input->quantization()->scale() != nullptr and
58 input->quantization()->scale()->size() == 1);
59 assert(input->quantization()->zero_point() != nullptr and
60 input->quantization()->zero_point()->size() == 1);
62 params.zero_point = input->quantization()->zero_point()->operator[](0);
63 params.scale = input->quantization()->scale()->operator[](0);
64
65 status = pal::Dequantize(params, core::OMRuntimeShape(input).flatSize(),
66 core::utils::castInputData<int8_t>(input_data),
67 core::utils::castOutputData<float>(output_data));
68 }
69 break;
70#endif // DIS_FLOAT
71 default:
72 {
73 status = UnsupportedType;
74 assert(false && "Unsupported type.");
75 }
76 }
77
78 return status;
79}
constexpr uint32_t outputTensorIdx
list input_data
Definition infer.py:29
OMStatus Dequantize(const core::QuantizationParams op_params, const uint32_t flat_size, const InputT *input_data, OutputT *output_data)
OMStatus SISOHeader(const OMExecuteArgs &execute_args, const circle::Tensor **input, const circle::Tensor **output, uint8_t **input_data, uint8_t **output_data)
Definition OMUtils.cpp:159
@ UnsupportedType
Definition OMStatus.h:26