24namespace py = pybind11;
33 std::cout <<
"[ERROR]\tNNFW_STATUS_ERROR\n";
36 std::cout <<
"[ERROR]\tNNFW_STATUS_UNEXPECTED_NULL\n";
39 std::cout <<
"[ERROR]\tNNFW_STATUS_INVALID_STATE\n";
42 std::cout <<
"[ERROR]\tNNFW_STATUS_OUT_OF_MEMORY\n";
45 std::cout <<
"[ERROR]\tNNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE\n";
48 std::cout <<
"[ERROR]\tNNFW_STATUS_DEPRECATED_API\n";
55 if (!strcmp(layout,
"NCHW"))
59 else if (!strcmp(layout,
"NHWC"))
63 else if (!strcmp(layout,
"NONE"))
69 std::cout <<
"[ERROR]\tLAYOUT_TYPE\n";
76 if (!strcmp(type,
"float32"))
80 else if (!strcmp(type,
"int32"))
84 else if (!strcmp(type,
"uint8"))
86 return NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8;
89 else if (!strcmp(type,
"bool"))
91 return NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL;
93 else if (!strcmp(type,
"int64"))
95 return NNFW_TYPE::NNFW_TYPE_TENSOR_INT64;
97 else if (!strcmp(type,
"int8"))
99 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED;
101 else if (!strcmp(type,
"int16"))
103 return NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED;
107 std::cout <<
"[ERROR] String to NNFW_TYPE Failure\n";
120 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM:
121 case NNFW_TYPE::NNFW_TYPE_TENSOR_UINT8:
123 case NNFW_TYPE::NNFW_TYPE_TENSOR_BOOL:
125 case NNFW_TYPE::NNFW_TYPE_TENSOR_INT64:
127 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
129 case NNFW_TYPE::NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
132 std::cout <<
"[ERROR] NNFW_TYPE to String Failure\n";
140 for (int32_t i = 0; i < tensor_info->
rank; ++i)
142 n *= tensor_info->
dims[i];
150 for (int32_t i = 0; i < tensor_info.
rank; ++i)
152 dims_list.append(tensor_info.
dims[i]);
159 tensor_info.
rank = py::len(array);
160 for (int32_t i = 0; i < tensor_info.
rank; ++i)
162 tensor_info.
dims[i] = py::cast<int32_t>(array[i]);
283 const char *c_str_path = path.cast<std::string>().c_str();
289 const char *c_str_path = path.cast<std::string>().c_str();
295 const char *c_str_path = path.cast<std::string>().c_str();
nnfw_train_info train_get_traininfo()
void train_export_circle(const py::str &path)
tensorinfo input_tensorinfo(uint32_t index)
void train_import_checkpoint(const py::str &path)
void set_input_tensorinfo(uint32_t index, const tensorinfo *tensor_info)
void train(bool update_weights)
void set_input_layout(uint32_t index, const char *layout)
float train_get_loss(uint32_t index)
void train_set_traininfo(const nnfw_train_info *info)
void set_output_layout(uint32_t index, const char *layout)
tensorinfo output_tensorinfo(uint32_t index)
NNFW_SESSION(const char *package_file_path, const char *backends)
void train_export_checkpoint(const py::str &path)
volatile const char info[]
void set_dims(tensorinfo &tensor_info, const py::list &array)
Set nnfw_tensorinfo->dims.
const char * getStringType(NNFW_TYPE type)
NNFW_TYPE getType(const char *type="")
void ensure_status(NNFW_STATUS status)
Handle errors with NNFW_STATUS in API functions.
NNFW_LAYOUT getLayout(const char *layout="")
uint64_t num_elems(const nnfw_tensorinfo *tensor_info)
Get the total number of elements in nnfw_tensorinfo->dims.
py::list get_dims(const tensorinfo &tensor_info)
Get nnfw_tensorinfo->dims.
NNFW_STATUS nnfw_await(nnfw_session *session)
Wait for asynchronous run to finish.
NNFW_STATUS nnfw_run_async(nnfw_session *session)
Run inference asynchronously.
NNFW_STATUS nnfw_output_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th output tensor info.
NNFW_STATUS nnfw_set_available_backends(nnfw_session *session, const char *backends)
Set available backends.
NNFW_STATUS nnfw_input_tensorinfo(nnfw_session *session, uint32_t index, nnfw_tensorinfo *tensor_info)
Get i-th input tensor info.
NNFW_STATUS nnfw_output_size(nnfw_session *session, uint32_t *number)
Get the number of outputs.
NNFW_STATUS nnfw_input_size(nnfw_session *session, uint32_t *number)
Get the number of inputs.
NNFW_STATUS nnfw_set_input_tensorinfo(nnfw_session *session, uint32_t index, const nnfw_tensorinfo *tensor_info)
Set input model's tensor info for resizing.
NNFW_STATUS nnfw_run(nnfw_session *session)
Run inference.
NNFW_STATUS nnfw_prepare(nnfw_session *session)
Prepare session to be ready for inference.
NNFW_STATUS nnfw_set_output_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
Set the layout of an output.
NNFW_LAYOUT
Data format of a tensor.
@ NNFW_LAYOUT_CHANNELS_LAST
@ NNFW_LAYOUT_CHANNELS_FIRST
NNFW_STATUS nnfw_set_input_layout(nnfw_session *session, uint32_t index, NNFW_LAYOUT layout)
Set the layout of an input.
NNFW_STATUS nnfw_train_get_traininfo(nnfw_session *session, nnfw_train_info *info)
Get training information.
NNFW_STATUS nnfw_create_session(nnfw_session **session)
Create a new session instance.
NNFW_STATUS
Result values returned from a call to an API function.
@ NNFW_STATUS_INVALID_STATE
@ NNFW_STATUS_UNEXPECTED_NULL
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_OUT_OF_MEMORY
NNFW_STATUS nnfw_close_session(nnfw_session *session)
Close a session instance.
NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss)
Get loss value for expected output.
NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path)
NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
Load model from nnpackage file or directory.
NNFW_STATUS nnfw_train_prepare(nnfw_session *session)
Prepare session to be ready for training.
NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights)
Train the model.
NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info *info)
Set training information.
NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path)
@ NNFW_TYPE_TENSOR_FLOAT32
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path)
Export current training model into circle model.
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
tensor info describes the type and shape of tensors
int32_t dims[NNFW_MAX_RANK]