ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Fill.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMKernelData.h"
21
24#include "PALFill.h"
25
26using namespace onert_micro;
27using namespace onert_micro::core;
28using namespace onert_micro::execute;
29
30namespace
31{
32
33constexpr uint32_t valueTensorIdx = 1;
34constexpr uint32_t outputTensorIdx = 0;
35
36} // namespace
37
38// NOTE: doesn't currently support dynamic shapes
39namespace onert_micro
40{
41namespace execute
42{
43
45{
46 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
47 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
48 uint16_t op_index = execute_args.kernel_index;
49
50 const circle::Tensor *value;
51 const circle::Tensor *output;
52
53 uint8_t *value_data;
54 uint8_t *output_data;
55
56 // Read kernel
57 {
58 execute::OMRuntimeKernel runtime_kernel;
59 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
60 if (status != Ok)
61 return status;
62
63 value = runtime_kernel.inputs[valueTensorIdx];
64 output = runtime_kernel.outputs[outputTensorIdx];
65 assert(value != nullptr);
66 assert(output != nullptr);
67
68 status = runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
69 if (status != Ok)
70 return status;
71
72 value_data = runtime_kernel.inputs_data[valueTensorIdx];
73 output_data = runtime_kernel.outputs_data[outputTensorIdx];
74 assert(value_data != nullptr);
75 assert(output_data != nullptr);
76 }
77
78 OMStatus status = Ok;
79
80 assert(OMRuntimeShape(value).flatSize() == 1);
82
83 switch (output->type())
84 {
85#ifndef DIS_FLOAT
86 case circle::TensorType_FLOAT32:
87 {
88 status = pal::Fill(core::utils::castInputData<float>(value_data), output_shape,
89 core::utils::castOutputData<float>(output_data));
90 }
91 break;
92#endif // DIS_FLOAT
93 case circle::TensorType_INT32:
94 {
95 status = pal::Fill(core::utils::castInputData<int32_t>(value_data), output_shape,
96 core::utils::castOutputData<int32_t>(output_data));
97 }
98 break;
99 default:
100 {
101 status = UnsupportedActivation;
102 assert(false && "Unsupported type.");
103 break;
104 }
105 }
106
107 return status;
108}
109
110} // namespace execute
111} // namespace onert_micro
uint8_t * outputs_data[maxOutputSize]
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t outputTensorIdx
OMStatus Fill(const T *input_data, const core::OMRuntimeShape &output_shape, T *output_data)
Definition PALFill.h:32
OMStatus execute_kernel_CircleFill(const OMExecuteArgs &execute_args)
Definition Fill.cpp:44
@ UnsupportedActivation
Definition OMStatus.h:28
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage