ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Transpose.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMKernelData.h"
21
23#include "execute/OMUtils.h"
25
26#include "PALTranspose.h"
27
28using namespace onert_micro;
29using namespace onert_micro::core;
30using namespace onert_micro::execute;
31
32namespace
33{
34constexpr int kInputTensorIdx = 0;
35constexpr int kPermTensorIdx = 1;
36constexpr int kOutputTensorIdx = 0;
37
38} // namespace
39
40namespace onert_micro
41{
42namespace execute
43{
44
46{
47 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
48 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
49 uint16_t op_index = execute_args.kernel_index;
50
51 const circle::Tensor *input;
52 const circle::Tensor *perm;
53 const circle::Tensor *output;
54
55 uint8_t *input_data;
56 uint8_t *perm_data;
57 uint8_t *output_data;
58
59 // Read kernel
60 {
61 execute::OMRuntimeKernel runtime_kernel;
62 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
63 if (status != Ok)
64 return status;
65
66 input = runtime_kernel.inputs[kInputTensorIdx];
67 perm = runtime_kernel.inputs[kPermTensorIdx];
68 output = runtime_kernel.outputs[kOutputTensorIdx];
69 assert(input != nullptr);
70 assert(perm != nullptr);
71 assert(output != nullptr);
72
73 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
74 if (status != Ok)
75 return status;
76
77 input_data = runtime_kernel.inputs_data[kInputTensorIdx];
78 perm_data = runtime_kernel.inputs_data[kPermTensorIdx];
79 output_data = runtime_kernel.outputs_data[kOutputTensorIdx];
80 assert(input_data != nullptr);
81 assert(perm_data != nullptr);
82 assert(output_data != nullptr);
83 }
84 OMStatus status;
85 OMRuntimeShape perm_shape(perm);
86 OMRuntimeShape input_shape(input);
88
89 for (int idx = 0; idx < input_shape.dimensionsCount(); ++idx)
90 assert(reinterpret_cast<int32_t *>(perm_data)[idx] >= 0 and
91 perm_data[idx] < input_shape.dimensionsCount());
92
94 params.perm_count = perm_shape.dims(0);
95 for (int i = 0; i < params.perm_count; ++i)
96 params.perm[i] = reinterpret_cast<int32_t *>(perm_data)[i];
97
98 switch (input->type())
99 {
100#ifndef DIS_FLOAT
101 case circle::TensorType_FLOAT32:
102 status = pal::Transpose<float>(params, input_shape, reinterpret_cast<float *>(input_data),
103 output_shape, reinterpret_cast<float *>(output_data));
104 break;
105#endif // DIS_FLOAT
106 default:
107 {
108 status = UnsupportedActivation;
109 assert(false && "Unsupported type.");
110 }
111 }
112
113 return status;
114}
115
116} // namespace execute
117} // namespace onert_micro
uint8_t * outputs_data[maxOutputSize]
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
OMStatus execute_kernel_CircleTranspose(const OMExecuteArgs &execute_args)
Definition Transpose.cpp:45
@ UnsupportedActivation
Definition OMStatus.h:28
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage