ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Pad.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMRuntimeShape.h"
21
24
25#include "PALPad.h"
26
27using namespace onert_micro;
28using namespace onert_micro::execute;
29
30namespace
31{
32
33constexpr uint32_t input1TensorIdx = 0;
34constexpr uint32_t input2TensorIdx = 1;
35constexpr uint32_t input3TensorIdx = 2;
36constexpr uint32_t outputTensorIdx = 0;
37
38} // namespace
39
40namespace onert_micro
41{
42namespace execute
43{
44
46{
47 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
48 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
49 uint16_t op_index = execute_args.kernel_index;
50
51 const circle::Tensor *input1;
52 const circle::Tensor *input2;
53 const circle::Tensor *input3;
54 const circle::Tensor *output;
55
56 uint8_t *input1_data;
57 uint8_t *input2_data;
58 uint8_t *input3_data;
59 uint8_t *output_data;
60
61 const circle::PadOptions *options;
62 // Read kernel
63 {
64 execute::OMRuntimeKernel runtime_kernel;
65 runtime_kernel.readKernel(op_index, runtime_context);
66
67 input1 = runtime_kernel.inputs[input1TensorIdx];
68 input2 = runtime_kernel.inputs[input2TensorIdx];
69 input3 = runtime_kernel.inputs[input3TensorIdx];
70 output = runtime_kernel.outputs[outputTensorIdx];
71 assert(input1 != nullptr);
72 assert(input2 != nullptr);
73 // input3 - can be nullptr
74 assert(output != nullptr);
75
76 runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
77
78 input1_data = runtime_kernel.inputs_data[input1TensorIdx];
79 input2_data = runtime_kernel.inputs_data[input2TensorIdx];
80 input3_data = runtime_kernel.inputs_data[input3TensorIdx];
81 output_data = runtime_kernel.outputs_data[outputTensorIdx];
82 assert(input1_data != nullptr);
83 assert(input2_data != nullptr);
84 // input3_data can be nullptr
85 assert(output_data != nullptr);
86
87 options = runtime_kernel.first_operator->builtin_options_as_PadOptions();
88 }
89
90 OMStatus status = Ok;
91
92 core::OMRuntimeShape input1_shape(input1);
93 core::OMRuntimeShape input2_shape(input2);
95
96 // Create PadParams
97 core::PadParams pad_params{};
98 const auto num_input_dimensions = input1_shape.dimensionsCount();
99 assert(num_input_dimensions <= 5);
100
101 if (num_input_dimensions > 5)
102 return UnsupportedType;
103
104 pad_params.left_padding_count = num_input_dimensions;
105 pad_params.right_padding_count = num_input_dimensions;
106
107 auto *paddings_data = reinterpret_cast<int32_t *>(input2_data);
108 for (int idx = num_input_dimensions - 1; idx >= 0; --idx)
109 {
110 pad_params.left_padding[idx] = paddings_data[idx * 2];
111 pad_params.right_padding[idx] = paddings_data[idx * 2 + 1];
112 }
113
114 switch (input1->type())
115 {
116#ifndef DIS_FLOAT
117 case circle::TensorType_FLOAT32:
118 {
119 float pad_value = input3_data == nullptr ? 0.f : *reinterpret_cast<float *>(input3_data[0]);
120 status = pal::Pad(pad_params, input1_shape, core::utils::castInputData<float>(input1_data),
121 pad_value, output_shape, core::utils::castOutputData<float>(output_data));
122 }
123 break;
124#endif // DIS_FLOAT
125#ifndef DIS_QUANT
126 case circle::TensorType_INT8:
127 {
128 // TODO CWQ quantization
129 // TODO non_zero padding
131 (*input1->quantization()->scale())[0],
132 static_cast<int32_t>((*input1->quantization()->zero_point())[0])};
134 (*output->quantization()->scale())[0],
135 static_cast<int32_t>((*output->quantization()->zero_point())[0])};
136
137 status = pal::QuantizedZeroPad(pad_params, input1_shape, in_qparams,
138 core::utils::castInputData<int8_t>(input1_data), output_shape,
139 out_qparams, core::utils::castOutputData<int8_t>(output_data));
140 }
141 break;
142#endif // DIS_QUANT
143 default:
144 {
145 status = UnsupportedType;
146 assert(false && "Unsupported type");
147 }
148 }
149
150 return status;
151}
152
153} // namespace execute
154} // namespace onert_micro
size_t dimensionsCount() const noexcept
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t input1TensorIdx
constexpr uint32_t outputTensorIdx
constexpr uint32_t input2TensorIdx
OMStatus Pad(const core::PadParams &op_params, const core::OMRuntimeShape &input_shape, const float *input_data, const float pad_value, const core::OMRuntimeShape &output_shape, float *output_data)
Definition PALPad.h:35
OMStatus QuantizedZeroPad(const core::PadParams &op_params, const core::OMRuntimeShape &input_shape, const onert_micro::core::QuantizationParams &input_qparams, const T *input_data, const core::OMRuntimeShape &output_shape, const onert_micro::core::QuantizationParams &output_qparams, T *output_data)
Definition PALPad.h:115
OMStatus execute_kernel_CirclePad(const OMExecuteArgs &execute_args)
Definition Pad.cpp:45
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage