ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_api_wrapper.cc
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "nnfw_api_wrapper.h"
18#include "nnfw_exceptions.h"
19
20#include <iostream>
21
22namespace onert::api::python
23{
24
25namespace py = pybind11;
26
28{
29 switch (status)
30 {
32 return;
34 throw NnfwError("NNFW_STATUS_ERROR");
36 throw NnfwUnexpectedNullError("NNFW_STATUS_UNEXPECTED_NULL");
38 throw NnfwInvalidStateError("NNFW_STATUS_INVALID_STATE");
40 throw NnfwOutOfMemoryError("NNFW_STATUS_OUT_OF_MEMORY");
42 throw NnfwInsufficientOutputError("NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE");
44 throw NnfwDeprecatedApiError("NNFW_STATUS_DEPRECATED_API");
45 default:
46 throw NnfwError("NNFW_UNKNOWN_ERROR");
47 }
48}
49
50NNFW_LAYOUT getLayout(const char *layout)
51{
52 if (std::strcmp(layout, "NCHW") == 0)
54 else if (std::strcmp(layout, "NHWC") == 0)
56 else if (std::strcmp(layout, "NONE") == 0)
58 else
59 throw NnfwError(std::string("Unknown layout type: '") + layout + "'");
60}
61
62NNFW_TYPE getType(const char *type)
63{
64 if (std::strcmp(type, "float32") == 0)
66 else if (std::strcmp(type, "int32") == 0)
68 else if (std::strcmp(type, "bool") == 0)
69 return NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8;
70 else if (std::strcmp(type, "bool") == 0)
71 return NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL;
72 else if (std::strcmp(type, "int64") == 0)
73 return NNFW_TYPE::NNFW_TYPE_TENSOR_INT64;
74 else if (std::strcmp(type, "int8") == 0)
75 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED;
76 else if (std::strcmp(type, "int16") == 0)
77 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED;
78 else
79 throw NnfwError(std::string("Cannot convert string to NNFW_TYPE: '") + type + "'");
80}
81
82const char *getStringType(NNFW_TYPE type)
83{
84 switch (type)
85 {
87 return "float32";
89 return "int32";
90 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM:
91 case NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8:
92 return "uint8";
93 case NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL:
94 return "bool";
95 case NNFW_TYPE::NNFW_TYPE_TENSOR_INT64:
96 return "int64";
97 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
98 return "int8";
99 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
100 return "int16";
101 default:
102 throw NnfwError(std::string("Cannot convert NNFW_TYPE enum to string (value=") +
103 std::to_string(static_cast<int>(type)) + ")");
104 }
105}
106
107uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
108{
109 uint64_t n = 1;
110 for (int32_t i = 0; i < tensor_info->rank; ++i)
111 {
112 n *= tensor_info->dims[i];
113 }
114 return n;
115}
116
117py::list get_dims(const tensorinfo &tensor_info)
118{
119 py::list dims_list;
120 for (int32_t i = 0; i < tensor_info.rank; ++i)
121 {
122 dims_list.append(tensor_info.dims[i]);
123 }
124 return dims_list;
125}
126
127void set_dims(tensorinfo &tensor_info, const py::list &array)
128{
129 tensor_info.rank = py::len(array);
130 for (int32_t i = 0; i < tensor_info.rank; ++i)
131 {
132 tensor_info.dims[i] = py::cast<int32_t>(array[i]);
133 }
134}
135
136NNFW_SESSION::NNFW_SESSION(const char *package_file_path, const char *backends)
137{
138 this->session = nullptr;
139 ensure_status(nnfw_create_session(&(this->session)));
140 ensure_status(nnfw_load_model_from_file(this->session, package_file_path));
142}
144{
145 if (session)
146 {
148 }
149}
150
152{
153 ensure_status(nnfw_close_session(this->session));
154 this->session = nullptr;
155}
156void NNFW_SESSION::set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
157{
159 ti.dtype = getType(tensor_info->dtype);
160 ti.rank = tensor_info->rank;
161 for (int i = 0; i < NNFW_MAX_RANK; i++)
162 {
163 ti.dims[i] = tensor_info->dims[i];
164 }
165 ensure_status(nnfw_set_input_tensorinfo(session, index, &ti));
166}
172{
173 uint32_t number;
174 NNFW_STATUS status = nnfw_input_size(session, &number);
175 ensure_status(status);
176 return number;
177}
179{
180 uint32_t number;
181 NNFW_STATUS status = nnfw_output_size(session, &number);
182 ensure_status(status);
183 return number;
184}
185void NNFW_SESSION::set_input_layout(uint32_t index, const char *layout)
186{
187 NNFW_LAYOUT nnfw_layout = getLayout(layout);
188 ensure_status(nnfw_set_input_layout(session, index, nnfw_layout));
189}
191{
192 nnfw_tensorinfo tensor_info = nnfw_tensorinfo();
193 ensure_status(nnfw_input_tensorinfo(session, index, &tensor_info));
194 tensorinfo ti;
195 ti.dtype = getStringType(tensor_info.dtype);
196 ti.rank = tensor_info.rank;
197 for (int i = 0; i < NNFW_MAX_RANK; i++)
198 {
199 ti.dims[i] = tensor_info.dims[i];
200 }
201 return ti;
202}
204{
205 nnfw_tensorinfo tensor_info = nnfw_tensorinfo();
206 ensure_status(nnfw_output_tensorinfo(session, index, &tensor_info));
207 tensorinfo ti;
208 ti.dtype = getStringType(tensor_info.dtype);
209 ti.rank = tensor_info.rank;
210 for (int i = 0; i < NNFW_MAX_RANK; i++)
211 {
212 ti.dims[i] = tensor_info.dims[i];
213 }
214 return ti;
215}
216
218// Internal APIs
220py::array NNFW_SESSION::get_output(uint32_t index)
221{
222 // First call into the C API
223 nnfw_tensorinfo out_info = {};
224 const void *out_buffer = nullptr;
225 ensure_status(nnfw_get_output(session, index, &out_info, &out_buffer));
226
227 // Convert nnfw_tensorinfo to our python-visible struct
228 size_t num_elements = 1;
229 std::vector<ssize_t> shape;
230 shape.reserve(out_info.rank);
231 for (int i = 0; i < out_info.rank; ++i)
232 {
233 shape.push_back(static_cast<ssize_t>(out_info.dims[i]));
234 num_elements *= static_cast<size_t>(out_info.dims[i]);
235 }
236
237 // Wrap the raw buffer in a numpy array;
238 auto np = py::module_::import("numpy");
239 py::dtype dt = np.attr("dtype")(py::str(getStringType(out_info.dtype))).cast<py::dtype>();
240 size_t itemsize = dt.attr("itemsize").cast<size_t>();
241
242 py::array arr(dt, shape);
243 std::memcpy(arr.mutable_data(), out_buffer, num_elements * itemsize);
244 arr.attr("flags").attr("writeable") = false;
245
246 return arr;
247}
248
250// Experimental APIs for inference
256
258// Experimental APIs for training
261{
262 nnfw_train_info train_info = nnfw_train_info();
263 ensure_status(nnfw_train_get_traininfo(session, &train_info));
264 return train_info;
265}
266
271
273
274void NNFW_SESSION::train(bool update_weights)
275{
276 ensure_status(nnfw_train(session, update_weights));
277}
278
279float NNFW_SESSION::train_get_loss(uint32_t index)
280{
281 float loss = 0.f;
282 ensure_status(nnfw_train_get_loss(session, index, &loss));
283 return loss;
284}
285
286void NNFW_SESSION::train_export_circle(const py::str &path)
287{
288 const char *c_str_path = path.cast<std::string>().c_str();
289 ensure_status(nnfw_train_export_circle(session, c_str_path));
290}
291
293{
294 const char *c_str_path = path.cast<std::string>().c_str();
295 ensure_status(nnfw_train_import_checkpoint(session, c_str_path));
296}
297
299{
300 const char *c_str_path = path.cast<std::string>().c_str();
301 ensure_status(nnfw_train_export_checkpoint(session, c_str_path));
302}
303
304} // namespace onert::api::python
void train_export_circle(const py::str &path)
py::array get_output(uint32_t index)
tensorinfo input_tensorinfo(uint32_t index)
void train_import_checkpoint(const py::str &path)
void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
void train(bool update_weights)
void set_input_layout(uint32_t index, const char *layout)
void train_set_traininfo(const nnfw_train_info *info)
void set_prepare_config(NNFW_PREPARE_CONFIG config)
tensorinfo output_tensorinfo(uint32_t index)
NNFW_SESSION(const char *package_file_path, const char *backends)
void train_export_checkpoint(const py::str &path)
volatile const char info[]
void set_dims(tensorinfo &tensor_info, const py::list &array)
Set nnfw_tensorinfo->dims.
const char * getStringType(NNFW_TYPE type)
NNFW_TYPE getType(const char *type="")
void ensure_status(NNFW_STATUS status)
Handle errors with NNFW_STATUS in API functions.
NNFW_LAYOUT getLayout(const char *layout="")
uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
Get the total number of elements in nnfw_tensorinfo->dims.
py::list get_dims(const tensorinfo &tensor_info)
Get nnfw_tensorinfo->dims.
NNFW_STATUS nnfw_await(nnfw_session *session)
Wait for asynchronous run to finish.
Definition nnfw_api.cc:90
NNFW_STATUS nnfw_run_async(nnfw_session *session)
Run inference asynchronously.
Definition nnfw_api.cc:84
NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th output tensor info.
Definition nnfw_api.cc:153
NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backends)
Set available backends.
Definition nnfw_api.cc:179
NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th input tensor info.
Definition nnfw_api.cc:146
NNFW_STATUS nnfw_output_size(nnfw_session *session, uint32_t *number)
Get the number of outputs.
Definition nnfw_api.cc:116
NNFW_STATUS nnfw_input_size(nnfw_session *session, uint32_t *number)
Get the number of inputs.
Definition nnfw_api.cc:110
NNFW_STATUS nnfw_set_input_tensorinfo(nnfw_session *session, uint32_t index, const nnfw_tensorinfo *tensor_info)
Set input model's tensor info for resizing.
Definition nnfw_api.cc:172
NNFW_STATUS nnfw_run(nnfw_session *session)
Run inference.
Definition nnfw_api.cc:78
NNFW_STATUS nnfw_prepare(nnfw_session *session)
Prepare session to be ready for inference.
Definition nnfw_api.cc:72
NNFW_LAYOUT
Data format of a tensor.
Definition nnfw.h:134
@ NNFW_LAYOUT_CHANNELS_LAST
Definition nnfw.h:141
@ NNFW_LAYOUT_CHANNELS_FIRST
Definition nnfw.h:146
@ NNFW_LAYOUT_NONE
Definition nnfw.h:136
NNFW_STATUS nnfw_set_input_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
Set the layout of an input.
Definition nnfw_api.cc:122
NNFW_STATUS nnfw_set_prepare_config(nnfw_session *session, NNFW_PREPARE_CONFIG key, const char *value)
Set prepare configuration.
Definition nnfw_api.cc:398
NNFW_STATUS nnfw_train_get_traininfo(nnfw_session *session, nnfw_train_info *info)
Get training information.
Definition nnfw_api.cc:262
NNFW_PREPARE_CONFIG
Configuration key for prepare (compile and schedule)
NNFW_STATUS nnfw_get_output(nnfw_session *session, uint32_t index, nnfw_tensorinfo *out_info, const void **out_buffer)
Python-binding-only API to retrieve a read-only output buffer and its tensor info.
NNFW_STATUS nnfw_create_session(nnfw_session **session)
Create a new session instance.
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93
@ NNFW_STATUS_OUT_OF_MEMORY
Definition onert-micro.h:99
NNFW_STATUS nnfw_close_session(nnfw_session *session)
Close a session instance.
Definition nnfw_api.cc:60
NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss)
Get loss value for expected output.
NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path)
NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
Load model from nnpackage file or directory.
NNFW_STATUS nnfw_train_prepare(nnfw_session *session)
Prepare session to be ready for training.
NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights)
Train the model.
NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info *info)
Set training information.
NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path)
NNFW_TYPE
Definition onert-micro.h:75
@ NNFW_TYPE_TENSOR_INT32
Definition onert-micro.h:79
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path)
Export current training model into circle model.
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]