ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Pack.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMRuntimeShape.h"
21
24
25using namespace onert_micro;
26using namespace onert_micro::execute;
27
28namespace
29{
30
31template <typename T> void packImpl(OMRuntimeKernel &runtime_kernel)
32{
33 const auto *options = runtime_kernel.first_operator->builtin_options_as_PackOptions();
34
35 core::OMRuntimeShape input_shape(runtime_kernel.inputs[0]);
36 core::OMRuntimeShape output_shape(runtime_kernel.outputs[0]);
37
38 const int values_count = options->values_count();
39 int axis = options->axis();
40 const int dimensions = output_shape.dimensionsCount();
41
42 if (axis < 0)
43 {
44 axis += dimensions;
45 }
46
47 int outer_size = 1;
48 for (int i = 0; i < axis; ++i)
49 outer_size *= output_shape.dims(i);
50
51 int copy_size = 1;
52 for (int i = axis + 1; i < dimensions; ++i)
53 copy_size *= output_shape.dims(i);
54
55 int input_size = 1;
56 for (int i = 0; i < input_shape.dimensionsCount(); ++i)
57 input_size *= input_shape.dims(i);
58
59 assert(input_size == copy_size * outer_size);
60
61 T *output_data = core::utils::castOutputData<T>(runtime_kernel.outputs_data[0]);
62 assert(output_data != nullptr);
63
64 for (int i = 0; i < values_count; ++i)
65 {
66 auto input_data = core::utils::castInputData<T>(runtime_kernel.inputs_data[i]);
67 assert(input_data != nullptr);
68 for (int k = 0; k < outer_size; ++k)
69 {
70 const T *input_ptr = input_data + copy_size * k;
71 int loc = k * values_count * copy_size + i * copy_size;
72 T *output_ptr = output_data + loc;
73 for (int j = 0; j < copy_size; ++j)
74 output_ptr[j] = input_ptr[j];
75 }
76 }
77}
78
79} // namespace
80
81OMStatus onert_micro::execute::execute_kernel_CirclePack(const OMExecuteArgs &execute_args)
82{
83 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
84 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
85 uint16_t op_index = execute_args.kernel_index;
86
87 execute::OMRuntimeKernel runtime_kernel;
88 runtime_kernel.readKernel(op_index, runtime_context);
89
90 const auto type = runtime_kernel.inputs[0]->type();
91 OMStatus status = Ok;
92
93 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
94
95 if (status != Ok)
96 return status;
97
98 switch (type)
99 {
100#ifndef DIS_FLOAT
101 case circle::TensorType_FLOAT32:
102 packImpl<float>(runtime_kernel);
103 break;
104#endif // DIS_FLOAT
105#ifndef DIS_QUANT
106 case circle::TensorType_INT8:
107 packImpl<int8_t>(runtime_kernel);
108 break;
109#endif // DIS_QUANT
110 case circle::TensorType_INT32:
111 packImpl<int32_t>(runtime_kernel);
112 break;
113 case circle::TensorType_INT64:
114 packImpl<int64_t>(runtime_kernel);
115 break;
116 default:
117 assert(false && "Unsupported type.");
118 status = UnsupportedType;
119 }
120
121 return status;
122}
int32_t dimensionsCount() const
Definition Tensor.h:106
int32_t dims(int i) const
Definition Tensor.h:108
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
list input_data
Definition infer.py:29
type
Definition infer.py:18
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage