ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
nnfw_api_wrapper.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__
18#define __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__
19
20#include "nnfw.h"
21#include "nnfw_experimental.h"
22
23#include <pybind11/stl.h>
24#include <pybind11/numpy.h>
25
26namespace onert
27{
28namespace api
29{
30namespace python
31{
32
33namespace py = pybind11;
34
48{
50 const char *dtype;
52 int32_t rank;
58};
59
67void ensure_status(NNFW_STATUS status);
68
75NNFW_LAYOUT getLayout(const char *layout = "");
76
83NNFW_TYPE getType(const char *type = "");
84
91const char *getStringType(NNFW_TYPE type);
92
101uint64_t num_elems(const nnfw_tensorinfo *tensor_info);
102
111py::list get_dims(const tensorinfo &tensor_info);
112
121void set_dims(tensorinfo &tensor_info, const py::list &array);
122
124{
125private:
127
128public:
129 NNFW_SESSION(const char *package_file_path, const char *backends);
131
132 void close_session();
133 void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info);
134 void prepare();
135 void run();
136 void run_async();
137 void wait();
142 template <typename T> void set_input(uint32_t index, py::array_t<T> &buffer)
143 {
144 nnfw_tensorinfo tensor_info;
145 nnfw_input_tensorinfo(this->session, index, &tensor_info);
146 NNFW_TYPE type = tensor_info.dtype;
147 uint32_t input_elements = num_elems(&tensor_info);
148 size_t length = sizeof(T) * input_elements;
149
150 ensure_status(nnfw_set_input(session, index, type, buffer.request().ptr, length));
151 }
156 template <typename T> void set_output(uint32_t index, py::array_t<T> &buffer)
157 {
158 nnfw_tensorinfo tensor_info;
159 nnfw_output_tensorinfo(this->session, index, &tensor_info);
160 NNFW_TYPE type = tensor_info.dtype;
161 uint32_t output_elements = num_elems(&tensor_info);
162 size_t length = sizeof(T) * output_elements;
163
164 ensure_status(nnfw_set_output(session, index, type, buffer.request().ptr, length));
165 }
166 uint32_t input_size();
167 uint32_t output_size();
168 // process the input layout by receiving a string from Python instead of NNFW_LAYOUT
169 void set_input_layout(uint32_t index, const char *layout);
170 // process the output layout by receiving a string from Python instead of NNFW_LAYOUT
171 void set_output_layout(uint32_t index, const char *layout);
172 tensorinfo input_tensorinfo(uint32_t index);
173 tensorinfo output_tensorinfo(uint32_t index);
174
176 // Experimental APIs for training
180
181 template <typename T> void train_set_input(uint32_t index, py::array_t<T> &buffer)
182 {
183 nnfw_tensorinfo tensor_info;
184 nnfw_input_tensorinfo(this->session, index, &tensor_info);
185
186 py::buffer_info buf_info = buffer.request();
187 const auto buf_shape = buf_info.shape;
188 assert(tensor_info.rank == static_cast<int32_t>(buf_shape.size()) && buf_shape.size() > 0);
189 tensor_info.dims[0] = static_cast<int32_t>(buf_shape.at(0));
190
191 ensure_status(nnfw_train_set_input(this->session, index, buffer.request().ptr, &tensor_info));
192 }
193 template <typename T> void train_set_expected(uint32_t index, py::array_t<T> &buffer)
194 {
195 nnfw_tensorinfo tensor_info;
196 nnfw_output_tensorinfo(this->session, index, &tensor_info);
197
198 py::buffer_info buf_info = buffer.request();
199 const auto buf_shape = buf_info.shape;
200 assert(tensor_info.rank == static_cast<int32_t>(buf_shape.size()) && buf_shape.size() > 0);
201 tensor_info.dims[0] = static_cast<int32_t>(buf_shape.at(0));
202
204 nnfw_train_set_expected(this->session, index, buffer.request().ptr, &tensor_info));
205 }
206 template <typename T> void train_set_output(uint32_t index, py::array_t<T> &buffer)
207 {
208 nnfw_tensorinfo tensor_info;
209 nnfw_output_tensorinfo(this->session, index, &tensor_info);
210 NNFW_TYPE type = tensor_info.dtype;
211 uint32_t output_elements = num_elems(&tensor_info);
212 size_t length = sizeof(T) * output_elements;
213
214 ensure_status(nnfw_train_set_output(session, index, type, buffer.request().ptr, length));
215 }
216
217 void train_prepare();
218 void train(bool update_weights);
219 float train_get_loss(uint32_t index);
220
221 void train_export_circle(const py::str &path);
222 void train_import_checkpoint(const py::str &path);
223 void train_export_checkpoint(const py::str &path);
224
225 // TODO Add other apis
226};
227
228} // namespace python
229} // namespace api
230} // namespace onert
231
232#endif // __ONERT_API_PYTHON_NNFW_API_WRAPPER_H__
void train_set_output(uint32_t index, py::array_t< T > &buffer)
void train_export_circle(const py::str &path)
tensorinfo input_tensorinfo(uint32_t index)
void train_import_checkpoint(const py::str &path)
void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
void set_input(uint32_t index, py::array_t< T > &buffer)
process input array according to data type of numpy array sent by Python (int, float,...
void train(bool update_weights)
void set_input_layout(uint32_t index, const char *layout)
void train_set_input(uint32_t index, py::array_t< T > &buffer)
void train_set_traininfo(const nnfw_train_info *info)
void set_output_layout(uint32_t index, const char *layout)
void set_output(uint32_t index, py::array_t< T > &buffer)
process output array according to data type of numpy array sent by Python (int, float,...
tensorinfo output_tensorinfo(uint32_t index)
void train_set_expected(uint32_t index, py::array_t< T > &buffer)
void train_export_checkpoint(const py::str &path)
volatile const char info[]
void set_dims(tensorinfo &tensor_info, const py::list &array)
Set nnfw_tensorinfo->dims.
const char * getStringType(NNFW_TYPE type)
NNFW_TYPE getType(const char *type="")
void ensure_status(NNFW_STATUS status)
Handle errors with NNFW_STATUS in API functions.
NNFW_LAYOUT getLayout(const char *layout="")
uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
Get the total number of elements in nnfw_tensorinfo->dims.
py::list get_dims(const tensorinfo &tensor_info)
Get nnfw_tensorinfo->dims.
This file describes runtime API.
NNFW_STATUS nnfw_set_input(nnfw_session *session, uint32_t index, NNFW_TYPE type, const void *buffer, size_t length)
Set input buffer.
Definition nnfw_api.cc:96
NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th output tensor info.
Definition nnfw_api.cc:141
NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th input tensor info.
Definition nnfw_api.cc:134
NNFW_STATUS nnfw_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
Set output buffer.
Definition nnfw_api.cc:103
NNFW_LAYOUT
Data format of a tensor.
Definition nnfw.h:134
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, void *input, const nnfw_tensorinfo *input_info)
Set training input.
NNFW_STATUS nnfw_train_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
Set training output buffer.
NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, void *expected, const nnfw_tensorinfo *expected_info)
Set training expected output.
NNFW_TYPE
Definition onert-micro.h:75
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]