ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ExpandDims.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
22
23using namespace onert_micro;
24using namespace onert_micro::core;
25
26namespace
27{
28
29constexpr uint32_t inputTensorIdx = 0;
30constexpr uint32_t axisTensorIdx = 1;
31constexpr uint32_t outputTensorIdx = 0;
32
33} // namespace
34
35namespace onert_micro
36{
37namespace import
38{
39
41{
42 OMRuntimeContext &runtime_context = config_args.runtime_context;
43 uint16_t op_index = config_args.kernel_index;
44 OMRuntimeStorage &runtime_storage = config_args.runtime_storage;
45
47
48 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
49 if (status != Ok)
50 return status;
51
52 const circle::Tensor *input = runtime_kernel.inputs[inputTensorIdx];
53 const circle::Tensor *axis = runtime_kernel.inputs[axisTensorIdx];
54 const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
55
56 assert(input != nullptr);
57 assert(axis != nullptr);
58 assert(output != nullptr);
59
60 status = utils::checkCondition(input->type() == output->type());
61 if (status != Ok)
62 return status;
63
64 OMRuntimeShape input_shape(input);
65
66 status = utils::checkCondition(axis->type() == circle::TensorType_INT32 or
67 axis->type() == circle::TensorType_INT64);
68 if (status != Ok)
69 return status;
70
71 // Check axis value
72 runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
73 uint8_t *axis_data = runtime_kernel.inputs_data[axisTensorIdx];
74 status = utils::checkCondition(axis_data != nullptr);
75 if (status != Ok)
76 return status;
77
78 if (axis->type() == circle::TensorType_INT32)
79 {
80 int32_t axis_value = *reinterpret_cast<int32_t *>(axis_data);
81 if (axis_value < 0)
82 {
83 axis_value += input_shape.dimensionsCount() + 1;
84 }
85
86 status = utils::checkCondition(axis_value <= input_shape.dimensionsCount() and axis_value >= 0);
87 if (status != Ok)
88 return status;
89 }
90 else
91 {
92 int64_t axis_value = *reinterpret_cast<int64_t *>(axis_data);
93 if (axis_value < 0)
94 {
95 axis_value += input_shape.dimensionsCount() + 1;
96 }
97
98 status = utils::checkCondition(axis_value <= input_shape.dimensionsCount() and axis_value >= 0);
99 if (status != Ok)
100 return status;
101 }
102
103 return status;
104}
105
106} // namespace import
107} // namespace onert_micro
size_t dimensionsCount() const noexcept
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
constexpr uint32_t outputTensorIdx
OMStatus configure_kernel_CircleExpandDims(const OMConfigureArgs &config_args)
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage