ONE - On-device Neural Engine
Loading...
Searching...
No Matches
Shape.cpp
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OMStatus.h"
18
19#include "core/OMUtils.h"
22
23using namespace onert_micro;
24using namespace onert_micro::core;
25
26namespace
27{
28
29constexpr uint32_t inputTensorIdx = 0;
30constexpr uint32_t outputTensorIdx = 0;
31
32} // namespace
33
34namespace onert_micro
35{
36namespace import
37{
38
40{
41 OMRuntimeContext &runtime_context = config_args.runtime_context;
42 uint16_t op_index = config_args.kernel_index;
43
45
46 OMStatus status = runtime_kernel.readKernel(op_index, runtime_context);
47 if (status != Ok)
48 return status;
49
50 const circle::Tensor *output = runtime_kernel.outputs[outputTensorIdx];
51
52 assert(output != nullptr);
53
54 status = utils::checkCondition(output->type() == circle::TensorType_INT32);
55
56 return status;
57}
58
59} // namespace import
60} // namespace onert_micro
OMStatus readKernel(uint16_t op_index, core::OMRuntimeContext &runtime_context)
const circle::Tensor * outputs[maxOutputSize]
constexpr uint32_t outputTensorIdx
OMStatus configure_kernel_CircleShape(const OMConfigureArgs &config_args)
Definition Shape.cpp:39
core::OMRuntimeContext & runtime_context