ONE - On-device Neural Engine
Loading...
Searching...
No Matches
ReduceProd.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "execute/OMUtils.h"
19#include "OMStatus.h"
21#include "core/OMUtils.h"
22
23#include "core/OMRuntimeShape.h"
24#include "PALReduceCommon.h"
25
26using namespace onert_micro;
27using namespace onert_micro::execute;
28
29namespace
30{
31
32constexpr uint32_t input1TensorIdx = 0;
33constexpr uint32_t input2TensorIdx = 1;
34constexpr uint32_t outputTensorIdx = 0;
35
36template <typename T>
37bool reduceProdGeneric(core::OMRuntimeShape &input_shape, const T *input_data,
38 core::OMRuntimeShape &axis_shape, const int *axis_data,
39 core::OMRuntimeShape &output_shape, T *output_data, bool keep_dims)
40{
41 return onert_micro::execute::pal::ReduceGeneric<T>(
42 input_data, input_shape.dimsData(), input_shape.dimensionsCount(), output_data, axis_data,
43 axis_shape.dimensionsCount(),
44 /*init_value=*/T(1), output_shape.flatSize(),
45 [](const T current, const T in) -> T { return in * current; });
46}
47
48} // namespace
49
50namespace onert_micro
51{
52namespace execute
53{
54
56{
57 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
58 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
59 uint16_t op_index = execute_args.kernel_index;
60
61 const circle::Tensor *input;
62 const circle::Tensor *axis;
63 const circle::Tensor *output;
64
65 uint8_t *input_data;
66 uint8_t *axis_data;
67 uint8_t *output_data;
68
69 uint16_t input_index = 0;
70 uint16_t axis_index = 0;
71
72 const circle::ReducerOptions *options;
73 // Read kernel
74 {
75 execute::OMRuntimeKernel runtime_kernel;
76 runtime_kernel.readKernel(op_index, runtime_context);
77
78 input = runtime_kernel.inputs[input1TensorIdx];
79 axis = runtime_kernel.inputs[input2TensorIdx];
80 output = runtime_kernel.outputs[outputTensorIdx];
81 assert(input != nullptr);
82 assert(axis != nullptr);
83 assert(output != nullptr);
84
85 runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
86
87 input_data = runtime_kernel.inputs_data[input1TensorIdx];
88 axis_data = runtime_kernel.inputs_data[input2TensorIdx];
89 output_data = runtime_kernel.outputs_data[outputTensorIdx];
90 assert(input_data != nullptr);
91 assert(axis_data != nullptr);
92 assert(output_data != nullptr);
93
94 options = runtime_kernel.first_operator->builtin_options_as_ReducerOptions();
95
96 input_index = runtime_kernel.inputs_index[input1TensorIdx];
97 axis_index = runtime_kernel.inputs_index[input2TensorIdx];
98 }
99
100 core::OMRuntimeShape input_shape(input);
101 core::OMRuntimeShape axis_shape(axis);
103
104 bool is_ok = false;
105
106 switch (input->type())
107 {
108#ifndef DIS_FLOAT
109 case circle::TensorType_FLOAT32:
110 is_ok = reduceProdGeneric<float>(
111 input_shape, core::utils::castInputData<float>(input_data), axis_shape,
112 core::utils::castInputData<int>(axis_data), output_shape,
113 core::utils::castOutputData<float>(output_data), options->keep_dims());
114 break;
115#endif // DIS_FLOAT
116 case circle::TensorType_INT32:
117 is_ok = reduceProdGeneric<int32_t>(
118 input_shape, core::utils::castInputData<int32_t>(input_data), axis_shape,
119 core::utils::castInputData<int>(axis_data), output_shape,
120 core::utils::castOutputData<int32_t>(output_data), options->keep_dims());
121 break;
122 case circle::TensorType_INT64:
123 is_ok = reduceProdGeneric<int64_t>(
124 input_shape, core::utils::castInputData<int64_t>(input_data), axis_shape,
125 core::utils::castInputData<int>(axis_data), output_shape,
126 core::utils::castOutputData<int64_t>(output_data), options->keep_dims());
127 break;
128 default:
129 assert(false && "Unsupported type");
130 return UnsupportedType;
131 }
132
133 if (!is_ok)
134 return UnknownError;
135
136 return Ok;
137}
138
139} // namespace execute
140} // namespace onert_micro
size_t dimensionsCount() const noexcept
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t input1TensorIdx
constexpr uint32_t outputTensorIdx
constexpr uint32_t input2TensorIdx
OMStatus execute_kernel_CircleReduceProd(const OMExecuteArgs &execute_args)
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage