ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Concatenation.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18#include "core/OMUtils.h"
19#include "OMStatus.h"
21
22#include "core/OMDataType.h"
23
24using namespace onert_micro;
25using namespace onert_micro::core;
26
27namespace onert_micro
28{
29namespace import
30{
31
33{
34 OMRuntimeContext &runtime_context = config_args.runtime_context;
35 uint16_t op_index = config_args.kernel_index;
36
38
39 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
40 if (status != Ok)
41 return status;
42
43 const int num_inputs = runtime_kernel.inputs_num;
44
45 const auto *t0 = runtime_kernel.inputs[0];
46 const auto *output = runtime_kernel.outputs[0];
47
48 const auto *params = runtime_kernel.first_operator->builtin_options_as_ConcatenationOptions();
49
50 // TODO: Support concat with fused activation function
51 if (params->fused_activation_function() != circle::ActivationFunctionType_NONE)
52 return UnknownError;
53
54 OMRuntimeShape input_shape(t0);
55 int axis = params->axis();
56 if (axis < 0)
57 axis += input_shape.dimensionsCount();
58
59 if (axis < 0 or axis > input_shape.dimensionsCount())
61
62 for (int i = 1; i < num_inputs; ++i)
63 {
64 const auto *tensor = runtime_kernel.inputs[i];
65 if (tensor->type() != t0->type())
67 }
68
69 if (t0->type() != circle::TensorType_INT8 and t0->type() != circle::TensorType_INT16)
70 return Ok;
71
72#ifndef DIS_QUANT
73 // If input tensors are INT8 or INT16 type then quantization parameters of all input tensors and
74 // the output should be the same
75 for (int i = 0; i < num_inputs; ++i)
76 {
77 const auto *tensor = runtime_kernel.inputs[i];
78
79 if (tensor->quantization() == nullptr)
81
82 if (tensor->quantization()->scale()->size() != 1)
84
85 if (tensor->quantization()->zero_point()->size() != 1)
87
88 if (*tensor->quantization()->scale()->begin() != *output->quantization()->scale()->begin())
90
91 if (*tensor->quantization()->zero_point()->begin() !=
92 *output->quantization()->zero_point()->begin())
94 }
95#endif // DIS_QUANT
96
97 return Ok;
98}
99
100} // namespace import
101} // namespace onert_micro
size_t dimensionsCount() const noexcept
const circle::Operator * first_operator
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
OMStatus configure_kernel_CircleConcatenation(const OMConfigureArgs &config_args)
@ FailedCheckCondition
Definition OMStatus.h:32
core::OMRuntimeContext & runtime_context