ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Slice.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMKernelData.h"
21
23#include "execute/OMUtils.h"
25
26#include "PALSlice.h"
27
28using namespace onert_micro;
29using namespace onert_micro::core;
30using namespace onert_micro::execute;
31
32namespace
33{
34
35const int MAX_DIM = 5;
36
37constexpr uint32_t input1TensorIdx = 0;
38constexpr uint32_t input2TensorIdx = 1;
39constexpr uint32_t input3TensorIdx = 2;
40
41constexpr uint32_t outputTensorIdx = 0;
42
43void getBeginAndSizeVectors(int dimensions, const int32_t *begin_data, const int32_t *size_data,
44 int32_t *begins, int32_t *sizes)
45{
46 int offset = MAX_DIM - dimensions;
47 for (int idx = 0; idx < dimensions; ++idx)
48 {
49 begins[offset + idx] = begin_data[idx];
50 sizes[offset + idx] = size_data[idx];
51 }
52}
53
54} // namespace
55
56// NOTE: doesnt currently support dynamic shapes
57namespace onert_micro
58{
59namespace execute
60{
61
63{
64 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
65 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
66 uint16_t op_index = execute_args.kernel_index;
67
68 const circle::Tensor *input1 = nullptr;
69 const circle::Tensor *input2 = nullptr;
70 const circle::Tensor *input3 = nullptr;
71
72 const circle::Tensor *output = nullptr;
73
74 uint8_t *input1_data;
75 const int32_t *input2_data;
76 const int32_t *input3_data;
77 uint8_t *output_data;
78
79 OMStatus status = Ok;
80 const circle::SliceOptions *options;
81 // Read kernel
82 {
83 execute::OMRuntimeKernel runtime_kernel;
84 runtime_kernel.readKernel(op_index, runtime_context);
85
86 input1 = runtime_kernel.inputs[input1TensorIdx];
87 input2 = runtime_kernel.inputs[input2TensorIdx];
88 input3 = runtime_kernel.inputs[input3TensorIdx];
89
90 output = runtime_kernel.outputs[outputTensorIdx];
91 assert(input1 != nullptr);
92 assert(input2 != nullptr);
93 assert(input3 != nullptr);
94 assert(output != nullptr);
95
96 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
97 if (status != Ok)
98 return status;
99
100 input1_data = runtime_kernel.inputs_data[input1TensorIdx];
101 input2_data = utils::castInputData<int32_t>(runtime_kernel.inputs_data[input2TensorIdx]);
102 input3_data = utils::castInputData<int32_t>(runtime_kernel.inputs_data[input3TensorIdx]);
103 output_data = runtime_kernel.outputs_data[outputTensorIdx];
104
105 assert(input1_data != nullptr);
106 assert(input2_data != nullptr);
107 assert(input3_data != nullptr);
108 assert(output_data != nullptr);
109
110 options = runtime_kernel.first_operator->builtin_options_as_SliceOptions();
111 }
112
113 OMRuntimeShape input_shape(input1);
114
115 SliceParams op_params{};
116 op_params.begin_count = MAX_DIM;
117 op_params.size_count = MAX_DIM;
118 for (int i = 0; i < MAX_DIM; i++)
119 {
120 op_params.begin[i] = 0;
121 op_params.size[i] = 1;
122 }
123 auto num_dim = input_shape.dimensionsCount();
124
125 getBeginAndSizeVectors(num_dim, input2_data, input3_data, op_params.begin, op_params.size);
126
127 switch (input1->type())
128 {
129#ifndef DIS_FLOAT
130 case circle::TensorType_FLOAT32:
131 {
132 status = pal::Slice(op_params, input_shape, utils::castInputData<float>(input1_data),
133 utils::castOutputData<float>(output_data));
134 }
135 break;
136#endif // DIS_FLOAT
137 case circle::TensorType_INT32:
138 {
139 status = pal::Slice(op_params, input_shape, utils::castInputData<int32_t>(input1_data),
140 utils::castOutputData<int32_t>(output_data));
141 }
142 break;
143 case circle::TensorType_INT64:
144 {
145 status = pal::Slice(op_params, input_shape, utils::castInputData<int64_t>(input1_data),
146 utils::castOutputData<int64_t>(output_data));
147 }
148 break;
149 default:
150 {
151 status = UnsupportedActivation;
152 assert(false && "Unsupported type.");
153 }
154 }
155
156 return status;
157}
158
159} // namespace execute
160} // namespace onert_micro
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
constexpr uint32_t input1TensorIdx
constexpr uint32_t outputTensorIdx
constexpr uint32_t input2TensorIdx
void getBeginAndSizeVectors(int dimensions, const Tensor *begin, const Tensor *size, std::vector< int > *begins, std::vector< int > *sizes)
Definition Slice.cpp:64
OMStatus Slice(const core::SliceParams &op_params, const core::OMRuntimeShape &input_shape, const T *input_data, T *output_data)
Definition PALSlice.h:29
OMStatus execute_kernel_CircleSlice(const OMExecuteArgs &execute_args)
Definition Slice.cpp:62
@ UnsupportedActivation
Definition OMStatus.h:28
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage