ONE - On-device Neural Engine
Loading...
Searching...
No Matches
FullyConnected.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
20
21#include "core/OMUtils.h"
22#include "core/OMKernelData.h"
23
25
26using namespace onert_micro;
27using namespace onert_micro::core;
28
29namespace
30{
31
32constexpr uint32_t inputTensorIdx = 0;
33constexpr uint32_t weightTensorIdx = 1;
34constexpr uint32_t biasTensorIdx = 2;
35
36constexpr uint32_t outputTensorIdx = 0;
37
38} // namespace
39
41onert_micro::import::configure_kernel_CircleFullyConnected(const OMConfigureArgs &config_args)
42{
43
44 OMRuntimeContext &runtime_context = config_args.runtime_context;
45 uint16_t op_index = config_args.kernel_index;
46 OMRuntimeStorage &runtime_storage = config_args.runtime_storage;
47
48 execute::OMRuntimeKernel runtime_kernel;
49 runtime_kernel.readKernel(op_index, runtime_context);
50
51 const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
52 const circle::Tensor *weight = runtime_kernel.inputs[weightTensorIdx];
53 const circle::Tensor *bias = runtime_kernel.inputs[biasTensorIdx];
54 const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
55
56 assert(input != nullptr);
57 assert(weight != nullptr);
58 // Bias can be nullptr
59 assert(output != nullptr);
60
61 OMStatus status = Ok;
62
63#ifndef DIS_FLOAT
64 if (weight->type() == circle::TensorType_FLOAT32)
65 {
66
67 status = utils::checkCondition(input->type() == circle::TensorType_FLOAT32 and
68 output->type() == circle::TensorType_FLOAT32 and
69 (!bias or bias->type() == circle::TensorType_FLOAT32));
70 if (status != Ok)
71 return status;
72 }
73#endif // DIS_FLOAT
74#ifndef DIS_QUANT
75 if (weight->type() == circle::TensorType_UINT8)
76 {
77
78 status = utils::checkCondition(input->type() == circle::TensorType_UINT8 and
79 output->type() == circle::TensorType_UINT8 and
80 (!bias or bias->type() == circle::TensorType_INT32));
81 if (status != Ok)
82 return status;
83 }
84 else if (weight->type() == circle::TensorType_INT8)
85 {
86 status = utils::checkCondition(input->type() == circle::TensorType_INT8 or
87 input->type() == circle::TensorType_FLOAT32);
88 if (status != Ok)
89 return status;
90
91 status = utils::checkCondition(output->type() == circle::TensorType_INT8 or
92 output->type() == circle::TensorType_FLOAT32);
93 if (status != Ok)
94 return status;
95
96 status = utils::checkCondition(!bias or bias->type() == circle::TensorType_INT32 or
97 bias->type() == circle::TensorType_INT64 or
98 bias->type() == circle::TensorType_FLOAT32);
99 if (status != Ok)
100 return status;
101
102 if (input->type() == circle::TensorType_FLOAT32)
103 {
104 // hybrid mode
105 // Check it is channel wise quantization
106 status = utils::checkCondition(weight->quantization() != nullptr and
107 weight->quantization()->scale() != nullptr);
108 if (status != Ok)
109 return status;
110 }
111 }
112 else if (weight->type() == circle::TensorType_INT16)
113 {
114
115 status = utils::checkCondition(input->type() == circle::TensorType_INT16 and
116 output->type() == circle::TensorType_INT16 and
117 (!bias or bias->type() == circle::TensorType_INT32));
118 if (status != Ok)
119 return status;
120 }
121#endif // DIS_QUANT
122
123 core::OMRuntimeShape weight_shape(weight);
124 core::OMRuntimeShape bias_shape(bias);
125 core::OMRuntimeShape input_shape(input);
127
128 status = utils::checkCondition(weight_shape.dimensionsCount() == 2);
129 if (status != Ok)
130 return status;
131
132 if (input_shape.flatSize() == 1 and output_shape.flatSize() != 1)
133 {
134#ifndef DIS_DYN_SHAPES
135 input_shape =
136 runtime_storage.getDynamicRuntimeShape(runtime_kernel.inputs_index[inputTensorIdx]);
137 if (input_shape.flatSize() == 0)
139#else
141#endif // DIS_DYN_SHAPES
142 }
143
144 status = utils::checkCondition(bias == nullptr or weight_shape.dims(0) == bias_shape.flatSize());
145
146 if (input->type() == circle::TensorType_FLOAT32)
147 return status;
148
149#ifndef DIS_QUANT
150
151 // Check quantized version
152 if (input->quantization() == nullptr or output->quantization() == nullptr or
153 weight->quantization() == nullptr)
154 return NoQuantization;
155
156 if (output->quantization()->scale() == nullptr or output->quantization()->scale()->size() != 1)
158
159 if (output->quantization()->zero_point() == nullptr or
160 output->quantization()->zero_point()->size() != 1)
162
163 if (weight->quantization()->scale() == nullptr or weight->quantization()->scale()->size() != 1)
165
166 if (weight->quantization()->zero_point() == nullptr or
167 weight->quantization()->zero_point()->size() != 1)
169
170#endif // DIS_QUANT
171
172 return status;
173}
OMRuntimeShape getDynamicRuntimeShape(uint16_t tensor_index)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t outputTensorIdx
@ UnsupportedQuantizationType
Definition OMStatus.h:27
@ NoQuantization
Definition OMStatus.h:33
@ UnsupportedDynamicShapeCase
Definition OMStatus.h:34