ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Slice.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Builders.h"
18#include "kernels/Utils.h"
19#include "MISOKernel.h"
20
21#include <cassert>
22
23namespace luci_interpreter
24{
25
26namespace
27{
28const int max_dim = 5;
29
30struct SliceParams
31{
33 int32_t begin[5];
34 int8_t size_count;
35 int32_t size[5];
36};
37
38template <typename T>
39inline void slice(const luci_interpreter::SliceParams &op_params,
40 const luci_interpreter::RuntimeShape &input_shape, const T *input_data,
41 const luci_interpreter::RuntimeShape &output_shape, T *output_data)
42{
43 const luci_interpreter::RuntimeShape ext_shape =
45 const int begin_count = op_params.begin_count;
46 const int size_count = op_params.size_count;
47 // We front-pad the begin and size vectors.
48 int start[5];
49 int stop[5];
50 for (int i = 0; i < 5; ++i)
51 {
52 int padded_i = 5 - i;
53 start[i] = begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i];
54 stop[i] = (size_count < padded_i || op_params.size[size_count - padded_i] == -1)
55 ? ext_shape.dims(i)
56 : start[i] + op_params.size[size_count - padded_i];
57 }
58
59 for (int i0 = start[0]; i0 < stop[0]; ++i0)
60 {
61 for (int i1 = start[1]; i1 < stop[1]; ++i1)
62 {
63 for (int i2 = start[2]; i2 < stop[2]; ++i2)
64 {
65 for (int i3 = start[3]; i3 < stop[3]; ++i3)
66 {
67 for (int i4 = start[4]; i4 < stop[4]; ++i4)
68 {
69 auto position =
70 (((i0 * ext_shape.dims(1) + i1) * ext_shape.dims(2) + i2) * ext_shape.dims(3) + i3) *
71 ext_shape.dims(4) +
72 i4;
73 *output_data++ = input_data[position];
74 }
75 }
76 }
77 }
78 }
79}
80
81template <typename T>
82void getBeginAndSizeVectors(int dimensions, const uint8_t *begin_data, const uint8_t *size_data,
83 int32_t *begins, int32_t *sizes)
84{
85 int offset = max_dim - dimensions;
86 for (int idx = 0; idx < dimensions; ++idx)
87 {
88 begins[offset + idx] = kernels::getTensorData<T>(begin_data)[idx];
89 sizes[offset + idx] = kernels::getTensorData<T>(size_data)[idx];
90 }
91}
92} // namespace
93
94void configure_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
95{
96 kernels::MISOKernel kernel(cur_op, runtime_graph);
97
98 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input1()) ==
99 Tensor::element_type(kernel.output()));
100 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input2()) == DataType::S32 ||
101 Tensor::element_type(kernel.input2()) == DataType::S64);
102 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input3()) == DataType::S32 ||
103 Tensor::element_type(kernel.input3()) == DataType::S64);
104 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input2()) == 1);
105 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input3()) == 1);
106 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input1()) <= max_dim);
107}
108
109void execute_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
110{
111 kernels::MISOKernel kernel(cur_op, runtime_graph);
112
113 bool is_dynamic_shapes = false;
114
115 const circle::Tensor *input = kernel.input1();
116 const circle::Tensor *begin = kernel.input2();
117 const circle::Tensor *size_tensor = kernel.input3();
118 const circle::Tensor *output = kernel.output();
119
120 const auto *input_data = runtime_graph->getDataByTensor(input);
121 if (input_data == nullptr)
122 input_data = runtime_graph->getConstDataByTensor(input);
123 assert(input_data);
124
125 const auto *begin_data = runtime_graph->getDataByTensor(begin);
126 if (begin_data == nullptr)
127 {
128 begin_data = runtime_graph->getConstDataByTensor(begin);
129 is_dynamic_shapes = true;
130 }
131 assert(begin_data);
132
133 const auto *size_data = runtime_graph->getDataByTensor(size_tensor);
134 if (size_data == nullptr)
135 {
136 size_data = runtime_graph->getConstDataByTensor(size_tensor);
137 is_dynamic_shapes = true;
138 }
139 assert(size_data);
140
141 auto *output_data = runtime_graph->getDataByTensor(output);
142 assert(output_data);
143
144 SliceParams op_params{};
145 op_params.begin_count = max_dim;
146 op_params.size_count = max_dim;
147 for (int i = 0; i < max_dim; i++)
148 {
149 op_params.begin[i] = 0;
150 op_params.size[i] = 1;
151 }
152 auto num_dim = Tensor::num_dims(input);
153
154 if (Tensor::element_type(begin) == DataType::S32)
155 {
156 getBeginAndSizeVectors<int32_t>(num_dim, begin_data, size_data, op_params.begin,
157 op_params.size);
158 }
159 else if (Tensor::element_type(begin) == DataType::S64)
160 {
161 getBeginAndSizeVectors<int64_t>(num_dim, begin_data, size_data, op_params.begin,
162 op_params.size);
163 }
164 else
165 {
166 assert(false && "Unsupported type");
167 }
168
169#ifndef DIS_DYN_SHAPES
170 if (is_dynamic_shapes)
171 {
172 int32_t data_size = 1;
173 luci_interpreter::RuntimeShape dynamic_shapes(max_dim - num_dim + 1);
174 int offset = max_dim - Tensor::num_dims(input);
175 for (int i = 0; i <= max_dim - num_dim; ++i)
176 {
177 if (i + offset > 4)
178 return;
179 auto cur_size = op_params.size[i + offset] != -1
180 ? op_params.size[i + offset]
181 : Tensor::dim(input, i) - op_params.begin[i + offset];
182 data_size *= cur_size;
183
184 dynamic_shapes.setDim(i, cur_size);
185 }
186 data_size *= size(Tensor::element_type(output));
187
188 runtime_graph->addDynamicShapeTensor(output, std::move(dynamic_shapes));
189
190 if (data_size == 0)
191 {
192 runtime_graph->resetTensorData(nullptr, output);
193 return;
194 }
195
196 auto new_output_data = new uint8_t[data_size];
197 output_data = new_output_data;
198 runtime_graph->resetTensorData(new_output_data, output);
199 }
200#else
201 assert(is_dynamic_shapes == false);
202#endif // DIS_DYN_SHAPES
203
204 switch (Tensor::element_type(input))
205 {
206#ifndef DIS_FLOAT
207 case DataType::FLOAT32:
208 slice<float>(op_params, kernels::getTensorShape(input),
209 kernels::getTensorData<float>(input_data), kernels::getTensorShape(output),
210 kernels::getTensorData<float>(output_data));
211 break;
212#endif // DIS_FLOAT
213#ifndef DIS_QUANT
214 case DataType::U8:
215 slice<uint8_t>(op_params, kernels::getTensorShape(input),
216 kernels::getTensorData<uint8_t>(input_data), kernels::getTensorShape(output),
217 kernels::getTensorData<uint8_t>(output_data));
218 break;
219 case DataType::S8:
220 slice<int8_t>(op_params, kernels::getTensorShape(input),
221 kernels::getTensorData<int8_t>(input_data), kernels::getTensorShape(output),
222 kernels::getTensorData<int8_t>(output_data));
223 break;
224 case DataType::S16:
225 slice<int16_t>(op_params, kernels::getTensorShape(input),
226 kernels::getTensorData<int16_t>(input_data), kernels::getTensorShape(output),
227 kernels::getTensorData<int16_t>(output_data));
228 break;
229#endif // DIS_QUANT
230 default:
231 assert(false && "Unsupported input type.");
232 }
233}
234
235} // namespace luci_interpreter
uint8_t * getConstDataByTensor(const circle::Tensor *raw_tensor)
void addDynamicShapeTensor(const circle::Tensor *tensor, luci_interpreter::RuntimeShape &&shapes)
void resetTensorData(uint8_t *new_data, const circle::Tensor *tensor)
uint8_t * getDataByTensor(const circle::Tensor *raw_tensor)
int32_t dims(int i) const
Definition Tensor.h:108
void setDim(int i, int32_t val)
Definition Tensor.h:114
static RuntimeShape extendedShape(int new_shape_size, const RuntimeShape &shape)
Definition Tensor.h:95
const circle::Tensor * input1() const
Definition MISOKernel.h:64
const circle::Tensor * input2() const
Definition MISOKernel.h:65
const circle::Tensor * input3() const
Definition MISOKernel.h:66
const circle::Tensor * output() const
Definition MISOKernel.h:74
#define LUCI_INTERPRETER_CHECK(cond)
Definition Utils.h:36
__global uchar * offset(const Image *img, int x, int y)
Definition helpers.h:540
const luci_interpreter::RuntimeShape output_shape
list input_data
Definition infer.py:29
tflite::RuntimeShape getTensorShape(const Tensor *tensor)
Definition Utils.h:194
void getBeginAndSizeVectors(int dimensions, const Tensor *begin, const Tensor *size, std::vector< int > *begins, std::vector< int > *sizes)
Definition Slice.cpp:64
void configure_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
Definition Slice.cpp:94
void execute_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
Definition Slice.cpp:109
int32_t size[5]
Definition Slice.cpp:35
int8_t size_count
Definition Slice.cpp:34
int8_t begin_count
Definition Slice.cpp:32
int32_t begin[5]
Definition Slice.cpp:33
const loco::Dimension & dim(uint32_t axis) const
Definition Tensor.h:44