ONE - On-device Neural Engine
Loading...
Searching...
No Matches
BatchMatMul.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
20#include "core/OMRuntimeShape.h"
21
22#include "execute/OMUtils.h"
25#include "PALBatchMatMul.h"
26
27using namespace onert_micro;
28using namespace onert_micro::execute;
29
30namespace
31{
32
33constexpr uint32_t input1TensorIdx = 0;
34constexpr uint32_t input2TensorIdx = 1;
35constexpr uint32_t outputTensorIdx = 0;
36
37} // namespace
38
39// NOTE: doesnt currently support dynamic shapes
40namespace onert_micro
41{
42namespace execute
43{
44
46{
47 core::OMRuntimeContext &runtime_context = execute_args.runtime_context;
48 core::OMRuntimeStorage &runtime_storage = execute_args.runtime_storage;
49 uint16_t op_index = execute_args.kernel_index;
50
51 const circle::Tensor *input1;
52 const circle::Tensor *input2;
53 const circle::Tensor *output;
54
55 uint8_t *input1_data;
56 uint8_t *input2_data;
57 uint8_t *output_data;
58
59 const circle::BatchMatMulOptions *options;
60 // Read kernel
61 {
62 execute::OMRuntimeKernel runtime_kernel;
63 runtime_kernel.readKernel(op_index, runtime_context);
64
65 input1 = runtime_kernel.inputs[input1TensorIdx];
66 input2 = runtime_kernel.inputs[input2TensorIdx];
67 output = runtime_kernel.outputs[outputTensorIdx];
68 assert(input1 != nullptr);
69 assert(input2 != nullptr);
70 assert(output != nullptr);
71
72 runtime_kernel.getDataFromStorage(op_index, runtime_storage, runtime_context);
73
74 input1_data = runtime_kernel.inputs_data[input1TensorIdx];
75 input2_data = runtime_kernel.inputs_data[input2TensorIdx];
76 output_data = runtime_kernel.outputs_data[outputTensorIdx];
77 assert(input1_data != nullptr);
78 assert(input2_data != nullptr);
79 assert(output_data != nullptr);
80
81 options = runtime_kernel.first_operator->builtin_options_as_BatchMatMulOptions();
82 }
83
84 OMStatus status;
85
86 core::OMRuntimeShape input1_shape(input1);
87 core::OMRuntimeShape input2_shape(input2);
89
90 switch (input1->type())
91 {
92#ifndef DIS_FLOAT
93 case circle::TensorType_FLOAT32:
94 {
95 status = onert_micro::execute::pal::BatchMatMul<float, float>(
96 input1_shape, core::utils::castInputData<float>(input1_data), input2_shape,
97 core::utils::castInputData<float>(input2_data), output_shape,
98 core::utils::castOutputData<float>(output_data));
99 }
100 break;
101#endif // DIS_FLOAT
102 default:
103 {
104 status = UnsupportedType;
105 assert(false && "Unsupported type.");
106 }
107 }
108
109 return status;
110}
111
112} // namespace execute
113} // namespace onert_micro
uint8_t * outputs_data[maxOutputSize]
const circle::Operator * first_operator
OMStatus getDataFromStorage(uint16_t op_index, core::OMRuntimeStorage &storage, core::OMRuntimeContext &context)
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
const circle::Tensor * inputs[maxInputSize]
const luci_interpreter::RuntimeShape output_shape
constexpr uint32_t input1TensorIdx
constexpr uint32_t outputTensorIdx
constexpr uint32_t input2TensorIdx
OMStatus execute_kernel_CircleBatchMatMul(const OMExecuteArgs &execute_args)
@ UnsupportedType
Definition OMStatus.h:26
core::OMRuntimeContext & runtime_context
core::OMRuntimeStorage & runtime_storage