ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_api_wrapper.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "nnfw_api_wrapper.h"
18
19#include <iostream>
20
22{
23 switch (status)
24 {
26 break;
28 std::cout << "[ERROR]\tNNFW_STATUS_ERROR\n";
29 exit(1);
31 std::cout << "[ERROR]\tNNFW_STATUS_UNEXPECTED_NULL\n";
32 exit(1);
34 std::cout << "[ERROR]\tNNFW_STATUS_INVALID_STATE\n";
35 exit(1);
37 std::cout << "[ERROR]\tNNFW_STATUS_OUT_OF_MEMORY\n";
38 exit(1);
40 std::cout << "[ERROR]\tNNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE\n";
41 exit(1);
43 std::cout << "[ERROR]\tNNFW_STATUS_DEPRECATED_API\n";
44 exit(1);
45 }
46}
47
48NNFW_LAYOUT getLayout(const char *layout)
49{
50 if (!strcmp(layout, "NCHW"))
51 {
53 }
54 else if (!strcmp(layout, "NHWC"))
55 {
57 }
58 else if (!strcmp(layout, "NONE"))
59 {
61 }
62 else
63 {
64 std::cout << "[ERROR]\tLAYOUT_TYPE\n";
65 exit(1);
66 }
67}
68
69NNFW_TYPE getType(const char *type)
70{
71 if (!strcmp(type, "float32"))
72 {
74 }
75 else if (!strcmp(type, "int32"))
76 {
78 }
79 else if (!strcmp(type, "uint8"))
80 {
81 return NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8;
82 // return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM;
83 }
84 else if (!strcmp(type, "bool"))
85 {
86 return NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL;
87 }
88 else if (!strcmp(type, "int64"))
89 {
90 return NNFW_TYPE::NNFW_TYPE_TENSOR_INT64;
91 }
92 else if (!strcmp(type, "int8"))
93 {
94 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED;
95 }
96 else if (!strcmp(type, "int16"))
97 {
98 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED;
99 }
100 else
101 {
102 std::cout << "[ERROR] String to NNFW_TYPE Failure\n";
103 exit(1);
104 }
105}
106
107const char *getStringType(NNFW_TYPE type)
108{
109 switch (type)
110 {
112 return "float32";
114 return "int32";
115 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM:
116 case NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8:
117 return "uint8";
118 case NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL:
119 return "bool";
120 case NNFW_TYPE::NNFW_TYPE_TENSOR_INT64:
121 return "int64";
122 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
123 return "int8";
124 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
125 return "int16";
126 default:
127 std::cout << "[ERROR] NNFW_TYPE to String Failure\n";
128 exit(1);
129 }
130}
131
132uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
133{
134 uint64_t n = 1;
135 for (int32_t i = 0; i < tensor_info->rank; ++i)
136 {
137 n *= tensor_info->dims[i];
138 }
139 return n;
140}
141
142py::list get_dims(const tensorinfo &tensor_info)
143{
144 py::list dims_list;
145 for (int32_t i = 0; i < tensor_info.rank; ++i)
146 {
147 dims_list.append(tensor_info.dims[i]);
148 }
149 return dims_list;
150}
151
152void set_dims(tensorinfo &tensor_info, const py::list &array)
153{
154 tensor_info.rank = py::len(array);
155 for (int32_t i = 0; i < tensor_info.rank; ++i)
156 {
157 tensor_info.dims[i] = py::cast<int32_t>(array[i]);
158 }
159}
160
161NNFW_SESSION::NNFW_SESSION(const char *package_file_path, const char *backends)
162{
163 this->session = nullptr;
164 ensure_status(nnfw_create_session(&(this->session)));
165 ensure_status(nnfw_load_model_from_file(this->session, package_file_path));
167 ensure_status(nnfw_prepare(this->session));
168}
170{
171 if (session)
172 {
174 }
175}
176
178{
179 ensure_status(nnfw_close_session(this->session));
180 this->session = nullptr;
181}
182void NNFW_SESSION::set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
183{
185 ti.dtype = getType(tensor_info->dtype);
186 ti.rank = tensor_info->rank;
187 for (int i = 0; i < NNFW_MAX_RANK; i++)
188 {
189 ti.dims[i] = tensor_info->dims[i];
190 }
191 ensure_status(nnfw_set_input_tensorinfo(session, index, &ti));
192}
197{
198 uint32_t number;
199 NNFW_STATUS status = nnfw_input_size(session, &number);
200 ensure_status(status);
201 return number;
202}
204{
205 uint32_t number;
206 NNFW_STATUS status = nnfw_output_size(session, &number);
207 ensure_status(status);
208 return number;
209}
210void NNFW_SESSION::set_input_layout(uint32_t index, const char *layout)
211{
212 NNFW_LAYOUT nnfw_layout = getLayout(layout);
213 ensure_status(nnfw_set_input_layout(session, index, nnfw_layout));
214}
215void NNFW_SESSION::set_output_layout(uint32_t index, const char *layout)
216{
217 NNFW_LAYOUT nnfw_layout = getLayout(layout);
218 ensure_status(nnfw_set_output_layout(session, index, nnfw_layout));
219}
221{
222 nnfw_tensorinfo tensor_info = nnfw_tensorinfo();
223 ensure_status(nnfw_input_tensorinfo(session, index, &tensor_info));
224 tensorinfo ti;
225 ti.dtype = getStringType(tensor_info.dtype);
226 ti.rank = tensor_info.rank;
227 for (int i = 0; i < NNFW_MAX_RANK; i++)
228 {
229 ti.dims[i] = tensor_info.dims[i];
230 }
231 return ti;
232}
234{
235 nnfw_tensorinfo tensor_info = nnfw_tensorinfo();
236 ensure_status(nnfw_output_tensorinfo(session, index, &tensor_info));
237 tensorinfo ti;
238 ti.dtype = getStringType(tensor_info.dtype);
239 ti.rank = tensor_info.rank;
240 for (int i = 0; i < NNFW_MAX_RANK; i++)
241 {
242 ti.dims[i] = tensor_info.dims[i];
243 }
244 return ti;
245}
NNFW_SESSION(const char *package_file_path, const char *backends)
tensorinfo output_tensorinfo(uint32_t index)
uint32_t output_size()
void set_input_layout(uint32_t index, const char *layout)
void set_output_layout(uint32_t index, const char *layout)
tensorinfo input_tensorinfo(uint32_t index)
uint32_t input_size()
void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
NNFW_STATUS nnfw_await(nnfw_session *session)
Wait for asynchronous run to finish.
Definition nnfw_api.cc:90
NNFW_STATUS nnfw_run_async(nnfw_session *session)
Run inference asynchronously.
Definition nnfw_api.cc:84
NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th output tensor info.
Definition nnfw_api.cc:141
NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backends)
Set available backends.
Definition nnfw_api.cc:167
NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th input tensor info.
Definition nnfw_api.cc:134
NNFW_STATUS nnfw_output_size(nnfw_session *session, uint32_t *number)
Get the number of outputs.
Definition nnfw_api.cc:116
NNFW_STATUS nnfw_input_size(nnfw_session *session, uint32_t *number)
Get the number of inputs.
Definition nnfw_api.cc:110
NNFW_STATUS nnfw_set_input_tensorinfo(nnfw_session *session, uint32_t index, const nnfw_tensorinfo *tensor_info)
Set input model's tensor info for resizing.
Definition nnfw_api.cc:160
NNFW_STATUS nnfw_run(nnfw_session *session)
Run inference.
Definition nnfw_api.cc:78
NNFW_STATUS nnfw_prepare(nnfw_session *session)
Prepare session to be ready for inference.
Definition nnfw_api.cc:72
NNFW_STATUS nnfw_set_output_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
Set the layout of an output.
Definition nnfw_api.cc:128
NNFW_LAYOUT
Data format of a tensor.
Definition nnfw.h:134
@ NNFW_LAYOUT_CHANNELS_LAST
Definition nnfw.h:141
@ NNFW_LAYOUT_CHANNELS_FIRST
Definition nnfw.h:146
@ NNFW_LAYOUT_NONE
Definition nnfw.h:136
NNFW_STATUS nnfw_set_input_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
Set the layout of an input.
Definition nnfw_api.cc:122
NNFW_LAYOUT getLayout(const char *layout)
void ensure_status(NNFW_STATUS status)
Handle errors with NNFW_STATUS in API functions.
py::list get_dims(const tensorinfo &tensor_info)
Get nnfw_tensorinfo->dims.
uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
Get the total number of elements in nnfw_tensorinfo->dims.
void set_dims(tensorinfo &tensor_info, const py::list &array)
Set nnfw_tensorinfo->dims.
const char * getStringType(NNFW_TYPE type)
NNFW_TYPE getType(const char *type)
NNFW_TYPE getType(const char *type="")
void ensure_status(NNFW_STATUS status)
Handle errors with NNFW_STATUS in API functions.
NNFW_LAYOUT getLayout(const char *layout="")
const char * getStringType(NNFW_TYPE type)
NNFW_STATUS nnfw_create_session(nnfw_session **session)
Create a new session instance.
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93
@ NNFW_STATUS_OUT_OF_MEMORY
Definition onert-micro.h:99
NNFW_STATUS nnfw_close_session(nnfw_session *session)
Close a session instance.
Definition nnfw_api.cc:60
NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
Load model from nnpackage file or directory.
NNFW_TYPE
Definition onert-micro.h:75
@ NNFW_TYPE_TENSOR_INT32
Definition onert-micro.h:79
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]
const char * dtype