ONE - On-device Neural Engine
Loading...
Searching...
No Matches
SpaceToDepth.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
18#include "OMStatus.h"
20
21#include "core/OMRuntimeShape.h"
22#include "PALSpaceToDepth.h"
23
24using namespace onert_micro;
25using namespace onert_micro::execute;
26namespace
27{
28
29constexpr uint32_t inputTensorIdx = 0;
30constexpr uint32_t outputTensorIdx = 0;
31
32} // namespace
33OMStatus onert_micro::execute::execute_kernel_CircleSpaceToDepth(
34 const onert_micro::execute::OMExecuteArgs &execute_args)
35{
36 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
37 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
38 uint16_t op_index = execute_args.kernel_index;
39
40 const circle::Tensor *input;
41 const circle::Tensor *output;
42
43 uint8_t *input_data;
44 uint8_t *output_data;
45
46 // Read kernel
47 execute::OMRuntimeKernel runtime_kernel;
48 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
49 if (status != Ok)
50 return status;
51
52 input = runtime_kernel.inputs[inputTensorIdx];
53 output = runtime_kernel.outputs[outputTensorIdx];
54
55 core::OMRuntimeShape input_shape(input);
57
58 assert(input != nullptr);
59 assert(output != nullptr);
60
61 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
62 if (status != Ok)
63 return status;
64
65 input_data = runtime_kernel.inputs_data[inputTensorIdx];
67 const auto *options = runtime_kernel.first_operator->builtin_options_as_SpaceToDepthOptions();
68 const int32_t block_size = options->block_size();
69 switch (input->type())
70 {
71#ifndef DIS_FLOAT
72 case circle::TensorType_FLOAT32:
73 {
74 status =
75 pal::SpaceToDepth<float>(block_size, input_shape, reinterpret_cast<float *>(input_data),
76 output_shape, reinterpret_cast<float *>(output_data));
77 }
78 break;
79#endif // DIS_FLOAT
80#ifndef DIS_QUANT
81 case circle::TensorType_INT8:
82 {
83 status =
84 pal::SpaceToDepth<int8_t>(block_size, input_shape, reinterpret_cast<int8_t *>(input_data),
85 output_shape, reinterpret_cast<int8_t *>(output_data));
86 }
87 break;
88#endif // DIS_QUANT
89 default:
90 {
91 status = UnsupportedType;
92 assert(false && "Unsupported type.");
93 }
94 }
95
96 return status;
97}
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t outputTensorIdx
list input_data
Definition infer.py:29
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage