ONE - On-device Neural Engine
|
#include "nnfw.h"
Go to the source code of this file.
Data Structures | |
struct | nnfw_operand |
struct | nnfw_custom_kernel_params |
struct | custom_kernel_registration_info |
struct | nnfw_loss_info |
struct | nnfw_train_info |
Training information to prepare training. More... | |
Typedefs | |
typedef void(* | nnfw_custom_eval) (nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size) |
typedef struct nnfw_loss_info | nnfw_loss_info |
typedef struct nnfw_train_info | nnfw_train_info |
Training information to prepare training. | |
Functions | |
NNFW_STATUS | nnfw_register_custom_op_info (nnfw_session *session, const char *id, custom_kernel_registration_info *info) |
NNFW_STATUS | nnfw_input_tensorindex (nnfw_session *session, const char *tensorname, uint32_t *index) |
Get the input tensor index by name. | |
NNFW_STATUS | nnfw_output_tensorindex (nnfw_session *session, const char *tensorname, uint32_t *index) |
Get the input tensor index by name. | |
NNFW_STATUS | nnfw_set_backends_per_operation (nnfw_session *session, const char *backend_settings) |
Set the backend for each operation in the session. | |
NNFW_STATUS | nnfw_prepare_pipeline (nnfw_session *session, const char *map_file_path=nullptr) |
Prepare session to be ready for inference. | |
NNFW_STATUS | nnfw_push_pipeline_input (nnfw_session *session, void *inputs, void *lengths) |
Set input buffer. | |
NNFW_STATUS | nnfw_pop_pipeline_output (nnfw_session *session, void *outputs) |
Get last outputs of partitioned model in session. | |
NNFW_STATUS | nnfw_train_get_traininfo (nnfw_session *session, nnfw_train_info *info) |
Get training information. | |
NNFW_STATUS | nnfw_train_set_traininfo (nnfw_session *session, const nnfw_train_info *info) |
Set training information. | |
NNFW_STATUS | nnfw_train_prepare (nnfw_session *session) |
Prepare session to be ready for training. | |
NNFW_STATUS | nnfw_train_set_input (nnfw_session *session, uint32_t index, const void *input, const nnfw_tensorinfo *input_info) |
Set training input. | |
NNFW_STATUS | nnfw_train_set_expected (nnfw_session *session, uint32_t index, const void *expected, const nnfw_tensorinfo *expected_info) |
Set training expected output. | |
NNFW_STATUS | nnfw_train_set_output (nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length) |
Set training output buffer. | |
NNFW_STATUS | nnfw_train (nnfw_session *session, bool update_weights) |
Train the model. | |
NNFW_STATUS | nnfw_train_get_loss (nnfw_session *session, uint32_t index, float *loss) |
Get loss value for expected output. | |
NNFW_STATUS | nnfw_train_export_circle (nnfw_session *session, const char *path) |
Export circle model. | |
NNFW_STATUS | nnfw_train_import_checkpoint (nnfw_session *session, const char *path) |
Import circle checkpoint. | |
NNFW_STATUS | nnfw_train_export_checkpoint (nnfw_session *session, const char *path) |
Export circle checkpoint. | |
NNFW_STATUS | nnfw_train_input_tensorinfo (nnfw_session *session, uint32_t index, nnfw_tensorinfo *info) |
Get the training model input information. | |
NNFW_STATUS | nnfw_train_expected_tensorinfo (nnfw_session *session, uint32_t index, nnfw_tensorinfo *info) |
Get the training model expected output information. | |
NNFW_STATUS | nnfw_set_quantization_type (nnfw_session *session, NNFW_QUANTIZE_TYPE qtype) |
Set quantization type. | |
NNFW_STATUS | nnfw_set_quantized_model_path (nnfw_session *session, const char *path) |
Set exported quantized model path. | |
NNFW_STATUS | nnfw_quantize (nnfw_session *session) |
Quantize circle model. | |
NNFW_STATUS | nnfw_set_codegen_model_path (nnfw_session *session, const char *path) |
Set exported codegen model path. | |
NNFW_STATUS | nnfw_codegen (nnfw_session *session, const char *target, NNFW_CODEGEN_PREF pref) |
Generate target-dependent code. | |
NNFW_STATUS | nnfw_set_odc_param_minmax_records_count (nnfw_session *session, int minmax_records_count) |
Set MinMax records count in auto compilation mode with on-device compiler. | |
NNFW_STATUS | nnfw_odc_delete_minmax_file (nnfw_session *session) |
Delete MinMax file for on-device compiler. | |
NNFW_STATUS | nnfw_run_with_auto_compilation (nnfw_session *session, const char *target, NNFW_CODEGEN_PREF pref) |
Run inference with auto compilation. | |
NNFW_STATUS | nnfw_set_prepare_config (nnfw_session *session, NNFW_PREPARE_CONFIG key, const char *value) |
Set prepare configuration. | |
NNFW_STATUS | nnfw_reset_prepare_config (nnfw_session *session) |
Reset prepare configurations. | |
NNFW_STATUS | nnfw_set_execute_config (nnfw_session *session, const NNFW_RUN_CONFIG key, const char *value) |
Set execution (run or train) configuration. | |
NNFW_STATUS | nnfw_reset_execute_config (nnfw_session *session) |
Reset execution (run or train) configurations. | |
typedef void(* nnfw_custom_eval) (nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size) |
Definition at line 55 of file nnfw_experimental.h.
typedef struct nnfw_loss_info nnfw_loss_info |
typedef struct nnfw_train_info nnfw_train_info |
Training information to prepare training.
enum NNFW_CODEGEN_PREF |
Preference for target-dependent code generation.
Definition at line 516 of file nnfw_experimental.h.
enum NNFW_PREPARE_CONFIG |
Configuration key for prepare (compile and schedule)
Enumerator | |
---|---|
NNFW_PREPARE_CONFIG_PROFILE | Prepare to dump execution time profile file (not require value setting) TODO: Use workspace |
Definition at line 620 of file nnfw_experimental.h.
enum NNFW_QUANTIZE_TYPE |
Convert between training mode and inference mode.
nnfw_train
or nnfw_prepare
[in] | session | The session to convert training mode to inference mode |
[in] | train | If false, convert training model to inference model If true, convert inference model to training model |
NNFW_STATUS_NO_ERROR
if successful On-Device Quantization APIsOn-Device Quantization APIs are designed to be used in the following order
You should use Quantization APIs after nnfw_load_model_from_file
, before nnfw_prepare
and nnfw_set_input_tensorinfo
.
quantization type
Definition at line 464 of file nnfw_experimental.h.
enum NNFW_RUN_CONFIG |
Configuration key for execution.
Enumerator | |
---|---|
NNFW_RUN_CONFIG_DUMP_MINMAX | Dump minmax data for each layers to workspace (not require value setting) |
NNFW_RUN_CONFIG_TRACE | Dump execution event file to workspace (not require value setting) |
NNFW_RUN_CONFIG_PROFILE | Dump execution time profile file (not require value setting) You should set prepare configuration TODO: Use workspace |
Definition at line 657 of file nnfw_experimental.h.
enum NNFW_TRAIN_LOSS |
Training C APIs
Training APIs are designed to be used in the following order for training
If you want to inference after training with the same session, you can use the following order
Enumerator | |
---|---|
NNFW_TRAIN_LOSS_UNDEFINED | |
NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR | |
NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY |
Definition at line 184 of file nnfw_experimental.h.
Definition at line 191 of file nnfw_experimental.h.
Special values of num_of_trainable_ops. Positive values are used to indicate layers to be trained from the back of the graph.
Enumerator | |
---|---|
NNFW_TRAIN_TRAINABLE_INCORRECT_STATE | Error value of number of trainable ops |
NNFW_TRAIN_TRAINABLE_ALL | All layers will be trained |
NNFW_TRAIN_TRAINABLE_NONE | No layer will be trained |
Definition at line 218 of file nnfw_experimental.h.
enum NNFW_TRAIN_OPTIMIZER |
Enumerator | |
---|---|
NNFW_TRAIN_OPTIMIZER_UNDEFINED | |
NNFW_TRAIN_OPTIMIZER_SGD | |
NNFW_TRAIN_OPTIMIZER_ADAM |
Definition at line 201 of file nnfw_experimental.h.
NNFW_STATUS nnfw_codegen | ( | nnfw_session * | session, |
const char * | target, | ||
NNFW_CODEGEN_PREF | pref | ||
) |
Generate target-dependent code.
This function opens a dynamic shared object. It searches for the object as flollows ld.so(8) search rules. If the nnfw_set_codegen_model_path
is not called before this function, the codegen model path is automatically defined and used using the same directory of the original model/package with the target backend extension.
[in] | session | nnfw_session the session which contains information about compilation |
[in] | target | Target backend to generate code This target string will be used to find a backend library. The name of target backend library should follow the following rules: 'lib' + {backend extension} + '-gen' + {lib extension} And the target string should be a name except 'lib' and {lib extension}. For example, if the backend extension is 'aaa', the backend library should be 'libaaa-gen.so', and the target string should be 'aaa-gen'. |
[in] | pref | NNFW_CODEGEN_PREF |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 347 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_input_tensorindex | ( | nnfw_session * | session, |
const char * | tensorname, | ||
uint32_t * | index | ||
) |
Get the input tensor index by name.
This function finds an input tensor of the given name. If found, the index value is set to the address that index
points to, and returns NNFW_STATUS_NO_ERROR
. Otherwise, index
is unchanged and returns NNFW_STATUS_ERROR
.
[in] | session | the session object |
[in] | tensorname | the name of the tensor to find, a null terminated char pointer string |
[out] | index | the index to be ret |
NNFW_STATUS_NO_ERROR
if successful Definition at line 197 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_odc_delete_minmax_file | ( | nnfw_session * | session | ) |
Delete MinMax file for on-device compiler.
[in] | session | nnfw_session |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 359 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_output_tensorindex | ( | nnfw_session * | session, |
const char * | tensorname, | ||
uint32_t * | index | ||
) |
Get the input tensor index by name.
This function finds an input tensor of the given name. If found, the index value is set to the address that index
points to, and returns NNFW_STATUS_NO_ERROR
. Otherwise, index
is unchanged and returns NNFW_STATUS_ERROR
.
[in] | session | the session object |
[in] | tensorname | the name of the tensor to find, a null terminated char pointer string |
[out] | index | the index to be ret |
NNFW_STATUS_NO_ERROR
if successful Definition at line 203 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_pop_pipeline_output | ( | nnfw_session * | session, |
void * | outputs | ||
) |
Get last outputs of partitioned model in session.
This function must be called after nnfw_prepare_pipeline
, outputs
given to this function must be cleared for memory management.
[in] | session | Session from last outputs is to be extracted |
[out] | outputs | Raw buffer for outputs, it must be std::vector<void *> type pointer for multiple output model |
NNFW_STATUS_NO_ERROR
if successful Definition at line 225 of file nnfw_api.cc.
References nnfw_session::deprecated().
NNFW_STATUS nnfw_prepare_pipeline | ( | nnfw_session * | session, |
const char * | map_file_path = nullptr |
||
) |
Prepare session to be ready for inference.
This phase may finalize model compilation, scheduling, and additional settings.
session | the session to be prepared |
Definition at line 215 of file nnfw_api.cc.
References nnfw_session::deprecated().
NNFW_STATUS nnfw_push_pipeline_input | ( | nnfw_session * | session, |
void * | inputs, | ||
void * | lengths | ||
) |
Set input buffer.
This function must be called after nnfw_prepare_pipeline
, inputs
given to this function can be reused for many inferences. lengths
must be greater or equal than the operand requires. if you give empty inputs
to this function, then this function will join all threads.
[in] | session | Session to the input is to be set |
[in] | inputs | Raw buffers for input, it must be std::vector<void *> type pointer for multiple input model |
[in] | lengths | Size of bytes of input buffers, it must be std::vector<uint32_t> type pointer for multiple input model |
NNFW_STATUS_NO_ERROR
if successful Definition at line 220 of file nnfw_api.cc.
References nnfw_session::deprecated().
NNFW_STATUS nnfw_quantize | ( | nnfw_session * | session | ) |
Quantize circle model.
[in] | session | nnfw_session to quantize |
ODC_STATUS_NO_ERROR
if successful, otherwise return ODC_STATUS_ERROR
Definition at line 335 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_register_custom_op_info | ( | nnfw_session * | session, |
const char * | id, | ||
custom_kernel_registration_info * | info | ||
) |
Definition at line 148 of file nnfw_api.cc.
References info, and NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_reset_execute_config | ( | nnfw_session * | session | ) |
Reset execution (run or train) configurations.
This function reset all execution configuration.
[in] | session | nnfw_session to reset all execution configurations |
NNFW_STATUS_NO_ERROR
if successful Definition at line 394 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_reset_prepare_config | ( | nnfw_session * | session | ) |
Reset prepare configurations.
This function reset all prepare configuration.
[in] | session | nnfw_session to reset all prepare configurations |
NNFW_STATUS_NO_ERROR
if successful Definition at line 381 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_run_with_auto_compilation | ( | nnfw_session * | session, |
const char * | target, | ||
NNFW_CODEGEN_PREF | pref | ||
) |
Run inference with auto compilation.
This function runs inference with automatic compilation and replaces the original model with a quantized or compiled model inside. During the inference the minmax statistics is collected and after that quantization is performed. If quantization was successful, try to code generating for target backend, otherwise run original float model. If compilation was successful, run compiled model, otherwise run quantized model. On-device compiler (ODC) provides quantization and compilation functionality. Function should be called after model is loaded by nnfw_load_model_from_file
, session is prepared for inference by nnfw_prepare
, set input and output buffers by nnfw_set_input
and nnfw_set_output
.
Additionally the following parameters should be set up :
nnfw_set_quantization_type
nnfw_set_quantized_model_path
nnfw_set_odc_param_minmax_records_count
nnfw_odc_delete_minmax_file
nnfw_set_codegen_model_path
[in] | session | nnfw_session |
[in] | target | Target backend to generate code as in nnfw_codegen |
[in] | pref | NNFW_CODEGEN_PREF |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 365 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_backends_per_operation | ( | nnfw_session * | session, |
const char * | backend_settings | ||
) |
Set the backend for each operation in the session.
This function assigns backends (acl_cl, acl_neon, cpu) to each operation in the session. If successful,the function returns NNFW_STATUS_NO_ERROR
. Otherwise, the function returns NNFW_STATUS_ERROR
.
[in] | session | the session object |
[in] | backend_settings | String containing backend assignments indexed by operation sequence |
NNFW_STATUS_NO_ERROR
if successful Definition at line 209 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_codegen_model_path | ( | nnfw_session * | session, |
const char * | path | ||
) |
Set exported codegen model path.
This function should be called before nnfw_codegen
is invoked.
[in] | session | nnfw_session to set codegen model path |
[in] | path | Target-dependent model path |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 341 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_execute_config | ( | nnfw_session * | session, |
const NNFW_RUN_CONFIG | key, | ||
const char * | value | ||
) |
Set execution (run or train) configuration.
This function set execution configuration to dump execution data to workspace. If you enable configuration to dump execution data into workspace and want to change workspace, refer nnfw_set_workspace
to use workspace directory.
[in] | session | nnfw_session to set execution configuration |
[in] | key | execution configuration key |
[in] | value | execution configuration value if needed, otherwise set NULL |
NNFW_STATUS_NO_ERROR
if successful Definition at line 387 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_odc_param_minmax_records_count | ( | nnfw_session * | session, |
int | minmax_records_count | ||
) |
Set MinMax records count in auto compilation mode with on-device compiler.
This function set MinMax records count for quantization in auto compilation mode. To enable automatic compilation mode, use nnfw_run_with_auto_compilation
[in] | session | nnfw_session |
[in] | minmax_records_count | minmax records count |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 353 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_prepare_config | ( | nnfw_session * | session, |
NNFW_PREPARE_CONFIG | key, | ||
const char * | value | ||
) |
Set prepare configuration.
This function set prepare configuration to decide additional compiling and scheduing feature. If you enable configuration to prepare dumping execution data into workspace, refer nnfw_set_workspace
to use workspace directory.
[in] | session | nnfw_session to set prepare configuration |
[in] | key | prepare configuration key |
[in] | value | prepare configuration value |
NNFW_STATUS_NO_ERROR
if successful Definition at line 374 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_quantization_type | ( | nnfw_session * | session, |
NNFW_QUANTIZE_TYPE | qtype | ||
) |
Set quantization type.
This function should be called before nnfw_quantize
is invoked.
[in] | session | nnfw_session to set quantization type |
[in] | pref | NNFW_QUANTIZE_TYPE |
NNFW_STATUS_NO_ERROR
if successful, NNFW_STATUS_UNEXPECTED_NULL
if session is null, otherwise return NNFW_STATUS_ERROR
Definition at line 323 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_set_quantized_model_path | ( | nnfw_session * | session, |
const char * | path | ||
) |
Set exported quantized model path.
This function should be called before nnfw_quantize
is invoked.
TODO: If this function is not called, quantized model will not be exported
[in] | session | nnfw_session to set quantized model path |
[in] | path | Quantized model path |
NNFW_STATUS_NO_ERROR
if successful, otherwise return NNFW_STATUS_ERROR
Definition at line 329 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train | ( | nnfw_session * | session, |
bool | update_weights | ||
) |
Train the model.
nnfw_train_set_input
and nnfw_train_set_expected
for each input and expected outputIn order to use update_weights
as false, it should be called after nnfw_train_set_output
.
[in] | session | The session to be trained |
[in] | update_weights | If true, update weights of the model If false, do not update weights of the model (for validation) |
NNFW_STATUS_NO_ERROR
if successful Definition at line 399 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_expected_tensorinfo | ( | nnfw_session * | session, |
uint32_t | index, | ||
nnfw_tensorinfo * | info | ||
) |
Get the training model expected output information.
nnfw_train_prepare
[in] | session | The session to get the training model expected output information |
[in] | index | The index of training model expected output |
[out] | info | The shape and type of training model expected output |
NNFW_STATUS_NO_ERROR
if successful Definition at line 263 of file nnfw_api.cc.
References info, and NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_export_checkpoint | ( | nnfw_session * | session, |
const char * | path | ||
) |
Export circle checkpoint.
nnfw_train
[in] | session | The session to export a checkpoint |
[in] | path | The path to export a checkpoint |
NNFW_STATUS_NO_ERROR
if successful Definition at line 409 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_export_circle | ( | nnfw_session * | session, |
const char * | path | ||
) |
Export circle model.
nnfw_train
[in] | session | The session to export inference model |
[in] | path | The path to export inference model |
NNFW_STATUS_NO_ERROR
if successfulExport circle model.
nnfw_train
[in] | session | The session to export inference model |
[in] | path | The path to export inference model |
NNFW_STATUS_NO_ERROR
if successful Definition at line 404 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_get_loss | ( | nnfw_session * | session, |
uint32_t | index, | ||
float * | loss | ||
) |
Get loss value for expected output.
nnfw_train
[in] | session | The session to get loss value |
[in] | index | The index of loss value [0, number of expected outputs) |
[out] | loss | The loss value |
NNFW_STATUS_NO_ERROR
if successful Definition at line 433 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_get_traininfo | ( | nnfw_session * | session, |
nnfw_train_info * | info | ||
) |
Get training information.
nnfw_load_model_from_file
For the field which is not set in training information, it returns training information filled with default value. The default value of each field is as follows : learning_rate = 0.0f, batch_size = 0, *_UNDEF for other enums
[in] | session | The session to get training information |
[out] | info | Training information |
NNFW_STATUS_NO_ERROR
if successful Definition at line 238 of file nnfw_api.cc.
References info, and NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_import_checkpoint | ( | nnfw_session * | session, |
const char * | path | ||
) |
Import circle checkpoint.
nnfw_train
[in] | session | The session to export a checkpoint |
[in] | path | The path to export a checkpoint |
NNFW_STATUS_NO_ERROR
if successful Definition at line 414 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_input_tensorinfo | ( | nnfw_session * | session, |
uint32_t | index, | ||
nnfw_tensorinfo * | info | ||
) |
Get the training model input information.
nnfw_train_prepare
[in] | session | The session to get the training model input information |
[in] | index | The index of training model input |
[out] | info | The shape and type of training model input |
NNFW_STATUS_NO_ERROR
if successful Definition at line 256 of file nnfw_api.cc.
References info, and NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_prepare | ( | nnfw_session * | session | ) |
Prepare session to be ready for training.
If training info is NOT set in session, this function returns @c NNFW_STATUS_ERROR . You should set training info using {@link nnfw_train_set_traininfo}.
[in] | session | The session to be prepared for training |
NNFW_STATUS_NO_ERROR
if successful Definition at line 397 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_set_expected | ( | nnfw_session * | session, |
uint32_t | index, | ||
const void * | expected, | ||
const nnfw_tensorinfo * | expected_info | ||
) |
Set training expected output.
nnfw_train_prepare
session | The session to be set training inputs and expected model outputs |
index | The index of training expected output |
expected | The expected buffers for training |
expected_info | The shape and type of expected buffer If it is nullptr, it will not change shape and batch size |
NNFW_STATUS_NO_ERROR
if successful Definition at line 277 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_set_input | ( | nnfw_session * | session, |
uint32_t | index, | ||
const void * | input, | ||
const nnfw_tensorinfo * | input_info | ||
) |
Set training input.
nnfw_train_prepare
[in] | session | The session to be set training inputs and expected model outputs |
[in] | index | The index of training input |
[in] | input | The input buffers for training |
[in] | input_info | The shape and type of input buffer If it is nullptr, it will not change shape and batch size |
NNFW_STATUS_NO_ERROR
if successful Definition at line 270 of file nnfw_api.cc.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_set_output | ( | nnfw_session * | session, |
uint32_t | index, | ||
NNFW_TYPE | type, | ||
void * | buffer, | ||
size_t | length | ||
) |
Set training output buffer.
This function must be called after nnfw_train_prepare
, buffer
given to this function can be reused for training. length
must be greater or equal than the operand requires. An output operand can have unspecified shape and deduced dynamically during the execution. You must provide buffer
large enough.
[in] | session | Session from inference output is to be extracted |
[in] | index | Index of output to be set (0-indexed) |
[in] | type | Type of the output |
[out] | buffer | Raw buffer for output |
[in] | length | Size of bytes of output buffer |
NNFW_STATUS_NO_ERROR
if successful Definition at line 445 of file onert-micro.cpp.
References NNFW_RETURN_ERROR_IF_NULL.
NNFW_STATUS nnfw_train_set_traininfo | ( | nnfw_session * | session, |
const nnfw_train_info * | info | ||
) |
Set training information.
nnfw_load_model_from_file
and before calling nnfw_train_prepare
[in] | session | The session to be set training information |
[in] | info | The training information |
NNFW_STATUS_NO_ERROR
if successful Definition at line 439 of file onert-micro.cpp.
References info, and NNFW_RETURN_ERROR_IF_NULL.