ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Split.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMKernelData.h"
21
23#include "execute/OMUtils.h"
25
26#include "PALSplit.h"
27
28using namespace onert_micro;
29using namespace onert_micro::core;
30using namespace onert_micro::execute;
31
32namespace
33{
34
35constexpr uint32_t axisTensorIdx = 0;
36constexpr uint32_t inputTensorIdx = 1;
37constexpr uint32_t outputTensorIdx = 0;
38
39} // namespace
40
41OMStatus onert_micro::execute::execute_kernel_CircleSplit(const OMExecuteArgs &execute_args)
42{
43 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
44 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
45 uint16_t op_index = execute_args.kernel_index;
46
47 const circle::Tensor *input;
48 const circle::Tensor *axis;
49 const circle::Tensor *output;
50
51 uint8_t *input_data;
52 uint8_t *axis_data;
53
54 // Read kernel
55 const circle::SplitOptions *options;
56
57 core::SplitParams params{};
58 {
59 execute::OMRuntimeKernel runtime_kernel;
60 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
61 if (status != Ok)
62 return status;
63
64 input = runtime_kernel.inputs[inputTensorIdx];
65 axis = runtime_kernel.inputs[axisTensorIdx];
66 output = runtime_kernel.outputs[outputTensorIdx];
67 assert(input != nullptr);
68 assert(axis != nullptr);
69 assert(output != nullptr);
70
71 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
72 if (status != Ok)
73 return status;
74
75 input_data = runtime_kernel.inputs_data[inputTensorIdx];
76 axis_data = runtime_kernel.inputs_data[axisTensorIdx];
77 assert(input_data != nullptr);
78 assert(axis_data != nullptr);
79 options = runtime_kernel.first_operator->builtin_options_as_SplitOptions();
80
81 params.num_outputs = options->num_splits();
82
83 for (uint32_t i = 0; i < params.num_outputs; ++i)
84 {
85 params.output_data[i] = runtime_kernel.outputs_data[i];
86 }
87 }
88 OMStatus status;
89 OMRuntimeShape axis_shape(axis);
90 OMRuntimeShape input_shape(input);
92
93 int32_t axis_value = utils::castInputData<int32_t>(axis_data)[0];
94 if (axis_value < 0)
95 {
96 axis_value += input_shape.dimensionsCount() + 1;
97 }
98
99 switch (input->type())
100 {
101#ifndef DIS_FLOAT
102 case circle::TensorType_FLOAT32:
103 status = pal::Split<float>(params, input_shape, core::utils::castInputData<float>(input_data),
104 output_shape, axis_value);
105 break;
106#endif // DIS_FLOAT
107 default:
108 {
109 status = UnsupportedActivation;
110 assert(false && "Unsupported type.");
111 }
112 }
113
114 return status;
115}
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t outputTensorIdx
list input_data
Definition infer.py:29
@ UnsupportedActivation
Definition OMStatus.h:28
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage