ONE - On-device Neural Engine
Loading...
Searching...
No Matches
nnfw_experimental.h File Reference
#include "nnfw.h"

Go to the source code of this file.

Data Structures

struct  nnfw_operand
 
struct  nnfw_custom_kernel_params
 
struct  custom_kernel_registration_info
 
struct  nnfw_loss_info
 
struct  nnfw_train_info
 Training information to prepare training. More...
 

Typedefs

typedef void(* nnfw_custom_eval) (nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size)
 
typedef struct nnfw_loss_info nnfw_loss_info
 
typedef struct nnfw_train_info nnfw_train_info
 Training information to prepare training.
 

Enumerations

enum  NNFW_TRAIN_LOSS { NNFW_TRAIN_LOSS_UNDEFINED = 0 , NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR = 1 , NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY = 2 }
 
enum  NNFW_TRAIN_LOSS_REDUCTION { NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED = 0 , NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE = 1 , NNFW_TRAIN_LOSS_REDUCTION_SUM = 2 }
 
enum  NNFW_TRAIN_OPTIMIZER { NNFW_TRAIN_OPTIMIZER_UNDEFINED = 0 , NNFW_TRAIN_OPTIMIZER_SGD = 1 , NNFW_TRAIN_OPTIMIZER_ADAM = 2 }
 
enum  NNFW_TRAIN_NUM_OF_TRAINABLE_OPS_SPECIAL_VALUES { NNFW_TRAIN_TRAINABLE_INCORRECT_STATE = -2 , NNFW_TRAIN_TRAINABLE_ALL = -1 , NNFW_TRAIN_TRAINABLE_NONE = 0 }
 Special values of num_of_trainable_ops. Positive values are used to indicate layers to be trained from the back of the graph. More...
 
enum  NNFW_QUANTIZE_TYPE {
  NNFW_QUANTIZE_TYPE_NOT_SET , NNFW_QUANTIZE_TYPE_U8_ASYM , NNFW_QUANTIZE_TYPE_I16_SYM , NNFW_QUANTIZE_TYPE_WO_I8_SYM ,
  NNFW_QUANTIZE_TYPE_WO_I16_SYM
}
 Convert between training mode and inference mode. More...
 
enum  NNFW_CODEGEN_PREF { NNFW_CODEGEN_PREF_DEFAULT , NNFW_CODEGEN_PREF_PERFORMANCE_FIRST , NNFW_CODEGEN_PREF_MEMORY_FIRST , NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST }
 Preference for target-dependent code generation. More...
 
enum  NNFW_PREPARE_CONFIG { NNFW_PREPARE_CONFIG_PROFILE }
 Configuration key for prepare (compile and schedule) More...
 
enum  NNFW_RUN_CONFIG { NNFW_RUN_CONFIG_DUMP_MINMAX , NNFW_RUN_CONFIG_TRACE , NNFW_RUN_CONFIG_PROFILE }
 Configuration key for execution. More...
 

Functions

NNFW_STATUS nnfw_register_custom_op_info (nnfw_session *session, const char *id, custom_kernel_registration_info *info)
 
NNFW_STATUS nnfw_input_tensorindex (nnfw_session *session, const char *tensorname, uint32_t *index)
 Get the input tensor index by name.
 
NNFW_STATUS nnfw_output_tensorindex (nnfw_session *session, const char *tensorname, uint32_t *index)
 Get the input tensor index by name.
 
NNFW_STATUS nnfw_set_backends_per_operation (nnfw_session *session, const char *backend_settings)
 Set the backend for each operation in the session.
 
NNFW_STATUS nnfw_prepare_pipeline (nnfw_session *session, const char *map_file_path=nullptr)
 Prepare session to be ready for inference.
 
NNFW_STATUS nnfw_push_pipeline_input (nnfw_session *session, void *inputs, void *lengths)
 Set input buffer.
 
NNFW_STATUS nnfw_pop_pipeline_output (nnfw_session *session, void *outputs)
 Get last outputs of partitioned model in session.
 
NNFW_STATUS nnfw_train_get_traininfo (nnfw_session *session, nnfw_train_info *info)
 Get training information.
 
NNFW_STATUS nnfw_train_set_traininfo (nnfw_session *session, const nnfw_train_info *info)
 Set training information.
 
NNFW_STATUS nnfw_train_prepare (nnfw_session *session)
 Prepare session to be ready for training.
 
NNFW_STATUS nnfw_train_set_input (nnfw_session *session, uint32_t index, const void *input, const nnfw_tensorinfo *input_info)
 Set training input.
 
NNFW_STATUS nnfw_train_set_expected (nnfw_session *session, uint32_t index, const void *expected, const nnfw_tensorinfo *expected_info)
 Set training expected output.
 
NNFW_STATUS nnfw_train_set_output (nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
 Set training output buffer.
 
NNFW_STATUS nnfw_train (nnfw_session *session, bool update_weights)
 Train the model.
 
NNFW_STATUS nnfw_train_get_loss (nnfw_session *session, uint32_t index, float *loss)
 Get loss value for expected output.
 
NNFW_STATUS nnfw_train_export_circle (nnfw_session *session, const char *path)
 Export circle model.
 
NNFW_STATUS nnfw_train_import_checkpoint (nnfw_session *session, const char *path)
 Import circle checkpoint.
 
NNFW_STATUS nnfw_train_export_checkpoint (nnfw_session *session, const char *path)
 Export circle checkpoint.
 
NNFW_STATUS nnfw_train_input_tensorinfo (nnfw_session *session, uint32_t index, nnfw_tensorinfo *info)
 Get the training model input information.
 
NNFW_STATUS nnfw_train_expected_tensorinfo (nnfw_session *session, uint32_t index, nnfw_tensorinfo *info)
 Get the training model expected output information.
 
NNFW_STATUS nnfw_set_quantization_type (nnfw_session *session, NNFW_QUANTIZE_TYPE qtype)
 Set quantization type.
 
NNFW_STATUS nnfw_set_quantized_model_path (nnfw_session *session, const char *path)
 Set exported quantized model path.
 
NNFW_STATUS nnfw_quantize (nnfw_session *session)
 Quantize circle model.
 
NNFW_STATUS nnfw_set_codegen_model_path (nnfw_session *session, const char *path)
 Set exported codegen model path.
 
NNFW_STATUS nnfw_codegen (nnfw_session *session, const char *target, NNFW_CODEGEN_PREF pref)
 Generate target-dependent code.
 
NNFW_STATUS nnfw_set_odc_param_minmax_records_count (nnfw_session *session, int minmax_records_count)
 Set MinMax records count in auto compilation mode with on-device compiler.
 
NNFW_STATUS nnfw_odc_delete_minmax_file (nnfw_session *session)
 Delete MinMax file for on-device compiler.
 
NNFW_STATUS nnfw_run_with_auto_compilation (nnfw_session *session, const char *target, NNFW_CODEGEN_PREF pref)
 Run inference with auto compilation.
 
NNFW_STATUS nnfw_set_prepare_config (nnfw_session *session, NNFW_PREPARE_CONFIG key, const char *value)
 Set prepare configuration.
 
NNFW_STATUS nnfw_reset_prepare_config (nnfw_session *session)
 Reset prepare configurations.
 
NNFW_STATUS nnfw_set_execute_config (nnfw_session *session, const NNFW_RUN_CONFIG key, const char *value)
 Set execution (run or train) configuration.
 
NNFW_STATUS nnfw_reset_execute_config (nnfw_session *session)
 Reset execution (run or train) configurations.
 

Typedef Documentation

◆ nnfw_custom_eval

typedef void(* nnfw_custom_eval) (nnfw_custom_kernel_params *params, char *userdata, size_t userdata_size)

Definition at line 55 of file nnfw_experimental.h.

◆ nnfw_loss_info

◆ nnfw_train_info

Training information to prepare training.

Enumeration Type Documentation

◆ NNFW_CODEGEN_PREF

Preference for target-dependent code generation.

Enumerator
NNFW_CODEGEN_PREF_DEFAULT 

Use the default configuration

NNFW_CODEGEN_PREF_PERFORMANCE_FIRST 

Do best efforts to generate target-dependent code for performance

NNFW_CODEGEN_PREF_MEMORY_FIRST 

Do best efforts to generate target-dependent code for reducing host memory usage

NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST 

Do best efforts to generate target-dependent code for reducing compilation time

Definition at line 516 of file nnfw_experimental.h.

517{
520 // TODO Support Traffic and Cycle code generation preference
NNFW_CODEGEN_PREF
Preference for target-dependent code generation.
@ NNFW_CODEGEN_PREF_DEFAULT
@ NNFW_CODEGEN_PREF_MEMORY_FIRST
@ NNFW_CODEGEN_PREF_COMPILE_TIME_FIRST
@ NNFW_CODEGEN_PREF_PERFORMANCE_FIRST

◆ NNFW_PREPARE_CONFIG

Configuration key for prepare (compile and schedule)

Enumerator
NNFW_PREPARE_CONFIG_PROFILE 

Prepare to dump execution time profile file (not require value setting) TODO: Use workspace

Definition at line 620 of file nnfw_experimental.h.

621{
NNFW_PREPARE_CONFIG
Configuration key for prepare (compile and schedule)
@ NNFW_PREPARE_CONFIG_PROFILE

◆ NNFW_QUANTIZE_TYPE

Convert between training mode and inference mode.

Note
This function should be called after nnfw_train or nnfw_prepare
Parameters
[in]sessionThe session to convert training mode to inference mode
[in]trainIf false, convert training model to inference model If true, convert inference model to training model
Returns
NNFW_STATUS_NO_ERROR if successful On-Device Quantization APIs

On-Device Quantization APIs are designed to be used in the following order

  1. nnfw_set_quantization_type
  2. nnfw_set_quantized_model_path
  3. nnfw_quantize

You should use Quantization APIs after nnfw_load_model_from_file, before nnfw_prepare and nnfw_set_input_tensorinfo.

quantization type

Enumerator
NNFW_QUANTIZE_TYPE_NOT_SET 

default value: type not set

NNFW_QUANTIZE_TYPE_U8_ASYM 

asymmetric quantization with a scale and zero point

NNFW_QUANTIZE_TYPE_I16_SYM 

symmetric quantization with a scale only

NNFW_QUANTIZE_TYPE_WO_I8_SYM 

weight-only int8 symmetric quantization

NNFW_QUANTIZE_TYPE_WO_I16_SYM 

weight-only int16 symmetric quantization

Definition at line 464 of file nnfw_experimental.h.

465{
476
NNFW_QUANTIZE_TYPE
Convert between training mode and inference mode.
@ NNFW_QUANTIZE_TYPE_WO_I16_SYM
@ NNFW_QUANTIZE_TYPE_U8_ASYM
@ NNFW_QUANTIZE_TYPE_I16_SYM
@ NNFW_QUANTIZE_TYPE_NOT_SET
@ NNFW_QUANTIZE_TYPE_WO_I8_SYM

◆ NNFW_RUN_CONFIG

Configuration key for execution.

Enumerator
NNFW_RUN_CONFIG_DUMP_MINMAX 

Dump minmax data for each layers to workspace (not require value setting)

NNFW_RUN_CONFIG_TRACE 

Dump execution event file to workspace (not require value setting)

NNFW_RUN_CONFIG_PROFILE 

Dump execution time profile file (not require value setting)

You should set prepare configuration NNFW_PREPARE_CONFIG_PROFILE before prepare. Otherwise, this configuration will be ignored.

TODO: Use workspace

Definition at line 657 of file nnfw_experimental.h.

658{
NNFW_RUN_CONFIG
Configuration key for execution.
@ NNFW_RUN_CONFIG_PROFILE
@ NNFW_RUN_CONFIG_TRACE
@ NNFW_RUN_CONFIG_DUMP_MINMAX

◆ NNFW_TRAIN_LOSS

Training C APIs

Training APIs are designed to be used in the following order for training

  1. nnfw_train_prepare
  2. nnfw_train_set_input, nnfw_train_set_expected for inputs & expected outputs
  3. nnfw_train
  4. nnfw_train_get_loss

If you want to inference after training with the same session, you can use the following order

  1. nnfw_set_input
  2. nnfw_set_output
  3. nnfw_run
Enumerator
NNFW_TRAIN_LOSS_UNDEFINED 
NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR 
NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY 

Definition at line 184 of file nnfw_experimental.h.

◆ NNFW_TRAIN_LOSS_REDUCTION

Enumerator
NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED 

Undefined

NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE 

Scalar sum divided by number of elements in losses

NNFW_TRAIN_LOSS_REDUCTION_SUM 

Scalar sum of weighted losses

Definition at line 191 of file nnfw_experimental.h.

◆ NNFW_TRAIN_NUM_OF_TRAINABLE_OPS_SPECIAL_VALUES

Special values of num_of_trainable_ops. Positive values are used to indicate layers to be trained from the back of the graph.

Enumerator
NNFW_TRAIN_TRAINABLE_INCORRECT_STATE 

Error value of number of trainable ops

NNFW_TRAIN_TRAINABLE_ALL 

All layers will be trained

NNFW_TRAIN_TRAINABLE_NONE 

No layer will be trained

Definition at line 218 of file nnfw_experimental.h.

219{
226
NNFW_TRAIN_NUM_OF_TRAINABLE_OPS_SPECIAL_VALUES
Special values of num_of_trainable_ops. Positive values are used to indicate layers to be trained fro...
@ NNFW_TRAIN_TRAINABLE_NONE
@ NNFW_TRAIN_TRAINABLE_ALL
@ NNFW_TRAIN_TRAINABLE_INCORRECT_STATE

◆ NNFW_TRAIN_OPTIMIZER

Enumerator
NNFW_TRAIN_OPTIMIZER_UNDEFINED 
NNFW_TRAIN_OPTIMIZER_SGD 
NNFW_TRAIN_OPTIMIZER_ADAM 

Definition at line 201 of file nnfw_experimental.h.

202{
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED

Function Documentation

◆ nnfw_codegen()

NNFW_STATUS nnfw_codegen ( nnfw_session session,
const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Generate target-dependent code.

This function opens a dynamic shared object. It searches for the object as flollows ld.so(8) search rules. If the nnfw_set_codegen_model_path is not called before this function, the codegen model path is automatically defined and used using the same directory of the original model/package with the target backend extension.

Parameters
[in]sessionnnfw_session the session which contains information about compilation
[in]targetTarget backend to generate code This target string will be used to find a backend library. The name of target backend library should follow the following rules: 'lib' + {backend extension} + '-gen' + {lib extension} And the target string should be a name except 'lib' and {lib extension}. For example, if the backend extension is 'aaa', the backend library should be 'libaaa-gen.so', and the target string should be 'aaa-gen'.
[in]prefNNFW_CODEGEN_PREF
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 347 of file nnfw_api.cc.

348{
350 return session->codegen(target, pref);
351}
SessionID session(const coco::Module *m)
Definition Session.cpp:48
#define NNFW_RETURN_ERROR_IF_NULL(p)
Definition nnfw_api.cc:51

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_input_tensorindex()

NNFW_STATUS nnfw_input_tensorindex ( nnfw_session session,
const char *  tensorname,
uint32_t *  index 
)

Get the input tensor index by name.

This function finds an input tensor of the given name. If found, the index value is set to the address that index points to, and returns NNFW_STATUS_NO_ERROR. Otherwise, index is unchanged and returns NNFW_STATUS_ERROR .

Note
If two or more input tensors are of the same name, the one with the lowest index is always returned.
Parameters
[in]sessionthe session object
[in]tensornamethe name of the tensor to find, a null terminated char pointer string
[out]indexthe index to be ret
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 197 of file nnfw_api.cc.

198{
200 return session->input_tensorindex(tensorname, index);
201}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_odc_delete_minmax_file()

NNFW_STATUS nnfw_odc_delete_minmax_file ( nnfw_session session)

Delete MinMax file for on-device compiler.

Parameters
[in]sessionnnfw_session
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 359 of file nnfw_api.cc.

360{
362 return session->delete_odc_minmax_file();
363}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_output_tensorindex()

NNFW_STATUS nnfw_output_tensorindex ( nnfw_session session,
const char *  tensorname,
uint32_t *  index 
)

Get the input tensor index by name.

This function finds an input tensor of the given name. If found, the index value is set to the address that index points to, and returns NNFW_STATUS_NO_ERROR. Otherwise, index is unchanged and returns NNFW_STATUS_ERROR .

Note
If two or more input tensors are of the same name, the one with the lowest index is always returned.
Parameters
[in]sessionthe session object
[in]tensornamethe name of the tensor to find, a null terminated char pointer string
[out]indexthe index to be ret
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 203 of file nnfw_api.cc.

204{
206 return session->output_tensorindex(tensorname, index);
207}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_pop_pipeline_output()

NNFW_STATUS nnfw_pop_pipeline_output ( nnfw_session session,
void *  outputs 
)

Get last outputs of partitioned model in session.

This function must be called after nnfw_prepare_pipeline, outputs given to this function must be cleared for memory management.

Parameters
[in]sessionSession from last outputs is to be extracted
[out]outputsRaw buffer for outputs, it must be std::vector<void *> type pointer for multiple output model
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 225 of file nnfw_api.cc.

226{
227 return nnfw_session::deprecated("nnfw_pop_pipeline_output: Deprecated");
228}
static NNFW_STATUS deprecated(const char *msg)

References nnfw_session::deprecated().

◆ nnfw_prepare_pipeline()

NNFW_STATUS nnfw_prepare_pipeline ( nnfw_session session,
const char *  map_file_path = nullptr 
)

Prepare session to be ready for inference.

This phase may finalize model compilation, scheduling, and additional settings.

Parameters
sessionthe session to be prepared
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 215 of file nnfw_api.cc.

216{
217 return nnfw_session::deprecated("nnfw_prepare_pipeline: Deprecated");
218}

References nnfw_session::deprecated().

◆ nnfw_push_pipeline_input()

NNFW_STATUS nnfw_push_pipeline_input ( nnfw_session session,
void *  inputs,
void *  lengths 
)

Set input buffer.

This function must be called after nnfw_prepare_pipeline, inputs given to this function can be reused for many inferences. lengths must be greater or equal than the operand requires. if you give empty inputs to this function, then this function will join all threads.

Parameters
[in]sessionSession to the input is to be set
[in]inputsRaw buffers for input, it must be std::vector<void *> type pointer for multiple input model
[in]lengthsSize of bytes of input buffers, it must be std::vector<uint32_t> type pointer for multiple input model
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 220 of file nnfw_api.cc.

221{
222 return nnfw_session::deprecated("nnfw_push_pipeline_input: Deprecated");
223}

References nnfw_session::deprecated().

◆ nnfw_quantize()

NNFW_STATUS nnfw_quantize ( nnfw_session session)

Quantize circle model.

Parameters
[in]sessionnnfw_session to quantize
Returns
ODC_STATUS_NO_ERROR if successful, otherwise return ODC_STATUS_ERROR

Definition at line 335 of file nnfw_api.cc.

336{
338 return session->quantize();
339}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_register_custom_op_info()

NNFW_STATUS nnfw_register_custom_op_info ( nnfw_session session,
const char *  id,
custom_kernel_registration_info info 
)

Definition at line 148 of file nnfw_api.cc.

150{
152 return session->register_custom_operation(id, info->eval_function);
153}
volatile const char info[]

References info, and NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_reset_execute_config()

NNFW_STATUS nnfw_reset_execute_config ( nnfw_session session)

Reset execution (run or train) configurations.

This function reset all execution configuration.

Parameters
[in]sessionnnfw_session to reset all execution configurations
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 394 of file nnfw_api.cc.

395{
397 return session->reset_execute_config();
398}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_reset_prepare_config()

NNFW_STATUS nnfw_reset_prepare_config ( nnfw_session session)

Reset prepare configurations.

This function reset all prepare configuration.

Parameters
[in]sessionnnfw_session to reset all prepare configurations
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 381 of file nnfw_api.cc.

382{
384 return session->reset_prepare_config();
385}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_run_with_auto_compilation()

NNFW_STATUS nnfw_run_with_auto_compilation ( nnfw_session session,
const char *  target,
NNFW_CODEGEN_PREF  pref 
)

Run inference with auto compilation.

This function runs inference with automatic compilation and replaces the original model with a quantized or compiled model inside. During the inference the minmax statistics is collected and after that quantization is performed. If quantization was successful, try to code generating for target backend, otherwise run original float model. If compilation was successful, run compiled model, otherwise run quantized model. On-device compiler (ODC) provides quantization and compilation functionality. Function should be called after model is loaded by nnfw_load_model_from_file, session is prepared for inference by nnfw_prepare, set input and output buffers by nnfw_set_input and nnfw_set_output.

Additionally the following parameters should be set up :

  1. Quantization type nnfw_set_quantization_type
  2. Quantizated model path nnfw_set_quantized_model_path
  3. Minmax records threshold for quantization nnfw_set_odc_param_minmax_records_count
  1. File with minMax statistics can be removed by nnfw_odc_delete_minmax_file
  2. Compiled model path nnfw_set_codegen_model_path
Parameters
[in]sessionnnfw_session
[in]targetTarget backend to generate code as in nnfw_codegen
[in]prefNNFW_CODEGEN_PREF
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 365 of file nnfw_api.cc.

367{
369 return session->run_with_auto_compilation(target, pref);
370}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_backends_per_operation()

NNFW_STATUS nnfw_set_backends_per_operation ( nnfw_session session,
const char *  backend_settings 
)

Set the backend for each operation in the session.

This function assigns backends (acl_cl, acl_neon, cpu) to each operation in the session. If successful,the function returns NNFW_STATUS_NO_ERROR. Otherwise, the function returns NNFW_STATUS_ERROR.

Note
The argument specifying backends must be in the format "OP_BACKEND_MAP=\"0=acl_cl;1=cpu;2=acl_cl\"".
Parameters
[in]sessionthe session object
[in]backend_settingsString containing backend assignments indexed by operation sequence
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 209 of file nnfw_api.cc.

210{
212 return session->set_backends_per_operation(backend_settings);
213}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_codegen_model_path()

NNFW_STATUS nnfw_set_codegen_model_path ( nnfw_session session,
const char *  path 
)

Set exported codegen model path.

This function should be called before nnfw_codegen is invoked.

Parameters
[in]sessionnnfw_session to set codegen model path
[in]pathTarget-dependent model path
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 341 of file nnfw_api.cc.

342{
344 return session->set_codegen_model_path(path);
345}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_execute_config()

NNFW_STATUS nnfw_set_execute_config ( nnfw_session session,
const NNFW_RUN_CONFIG  key,
const char *  value 
)

Set execution (run or train) configuration.

This function set execution configuration to dump execution data to workspace. If you enable configuration to dump execution data into workspace and want to change workspace, refer nnfw_set_workspace to use workspace directory.

Parameters
[in]sessionnnfw_session to set execution configuration
[in]keyexecution configuration key
[in]valueexecution configuration value if needed, otherwise set NULL
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 387 of file nnfw_api.cc.

389{
391 return session->set_execute_config(key, value);
392}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_odc_param_minmax_records_count()

NNFW_STATUS nnfw_set_odc_param_minmax_records_count ( nnfw_session session,
int  minmax_records_count 
)

Set MinMax records count in auto compilation mode with on-device compiler.

This function set MinMax records count for quantization in auto compilation mode. To enable automatic compilation mode, use nnfw_run_with_auto_compilation

Parameters
[in]sessionnnfw_session
[in]minmax_records_countminmax records count
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 353 of file nnfw_api.cc.

354{
356 return session->set_odc_param_minmax_records_count(minmax_records_count);
357}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_prepare_config()

NNFW_STATUS nnfw_set_prepare_config ( nnfw_session session,
NNFW_PREPARE_CONFIG  key,
const char *  value 
)

Set prepare configuration.

This function set prepare configuration to decide additional compiling and scheduing feature. If you enable configuration to prepare dumping execution data into workspace, refer nnfw_set_workspace to use workspace directory.

Parameters
[in]sessionnnfw_session to set prepare configuration
[in]keyprepare configuration key
[in]valueprepare configuration value
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 374 of file nnfw_api.cc.

376{
378 return session->set_prepare_config(key, value);
379}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_quantization_type()

NNFW_STATUS nnfw_set_quantization_type ( nnfw_session session,
NNFW_QUANTIZE_TYPE  qtype 
)

Set quantization type.

This function should be called before nnfw_quantize is invoked.

Parameters
[in]sessionnnfw_session to set quantization type
[in]prefNNFW_QUANTIZE_TYPE
Returns
NNFW_STATUS_NO_ERROR if successful, NNFW_STATUS_UNEXPECTED_NULL if session is null, otherwise return NNFW_STATUS_ERROR

Definition at line 323 of file nnfw_api.cc.

324{
326 return session->set_quantization_type(qtype);
327}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_set_quantized_model_path()

NNFW_STATUS nnfw_set_quantized_model_path ( nnfw_session session,
const char *  path 
)

Set exported quantized model path.

This function should be called before nnfw_quantize is invoked.

TODO: If this function is not called, quantized model will not be exported

Parameters
[in]sessionnnfw_session to set quantized model path
[in]pathQuantized model path
Returns
NNFW_STATUS_NO_ERROR if successful, otherwise return NNFW_STATUS_ERROR

Definition at line 329 of file nnfw_api.cc.

330{
332 return session->set_quantized_model_path(path);
333}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train()

NNFW_STATUS nnfw_train ( nnfw_session session,
bool  update_weights 
)

Train the model.

Note
This function should be called after nnfw_train_set_input and nnfw_train_set_expected for each input and expected output

In order to use update_weights as false, it should be called after nnfw_train_set_output.

Parameters
[in]sessionThe session to be trained
[in]update_weightsIf true, update weights of the model If false, do not update weights of the model (for validation)
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 399 of file onert-micro.cpp.

400{
401 return session->train_run(update_weights);
402}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_expected_tensorinfo()

NNFW_STATUS nnfw_train_expected_tensorinfo ( nnfw_session session,
uint32_t  index,
nnfw_tensorinfo info 
)

Get the training model expected output information.

Note
This function should be called after nnfw_train_prepare
Parameters
[in]sessionThe session to get the training model expected output information
[in]indexThe index of training model expected output
[out]infoThe shape and type of training model expected output
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 263 of file nnfw_api.cc.

265{
267 return session->train_expected_tensorinfo(index, info);
268}

References info, and NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_export_checkpoint()

NNFW_STATUS nnfw_train_export_checkpoint ( nnfw_session session,
const char *  path 
)

Export circle checkpoint.

Note
This function should be called on training mode This function should be called after nnfw_train
Parameters
[in]sessionThe session to export a checkpoint
[in]pathThe path to export a checkpoint
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 409 of file onert-micro.cpp.

410{
411 return session->train_export_checkpoint(path);
412}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_export_circle()

NNFW_STATUS nnfw_train_export_circle ( nnfw_session session,
const char *  path 
)

Export circle model.

Note
This function should be called on training mode This function should be called after nnfw_train
Parameters
[in]sessionThe session to export inference model
[in]pathThe path to export inference model
Returns
NNFW_STATUS_NO_ERROR if successful

Export circle model.

Note
This function should be called on training mode This function should be called after nnfw_train
Parameters
[in]sessionThe session to export inference model
[in]pathThe path to export inference model
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 404 of file onert-micro.cpp.

405{
406 return session->train_export_circle(path);
407}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_get_loss()

NNFW_STATUS nnfw_train_get_loss ( nnfw_session session,
uint32_t  index,
float *  loss 
)

Get loss value for expected output.

Note
This function should be called after nnfw_train
Parameters
[in]sessionThe session to get loss value
[in]indexThe index of loss value [0, number of expected outputs)
[out]lossThe loss value
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 433 of file onert-micro.cpp.

434{
436 return session->train_get_loss(index, loss);
437}
#define NNFW_RETURN_ERROR_IF_NULL(p)

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_get_traininfo()

NNFW_STATUS nnfw_train_get_traininfo ( nnfw_session session,
nnfw_train_info info 
)

Get training information.

Note
This function should be called after calling nnfw_load_model_from_file
   For the field which is not set in training information, it returns training information
   filled with default value. The default value of each field is as follows :
   learning_rate = 0.0f, batch_size = 0, *_UNDEF for other enums
Parameters
[in]sessionThe session to get training information
[out]infoTraining information
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 238 of file nnfw_api.cc.

239{
241 return session->train_get_traininfo(info);
242}

References info, and NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_import_checkpoint()

NNFW_STATUS nnfw_train_import_checkpoint ( nnfw_session session,
const char *  path 
)

Import circle checkpoint.

Note
This function should be called on training mode This function should be called before nnfw_train
Parameters
[in]sessionThe session to export a checkpoint
[in]pathThe path to export a checkpoint
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 414 of file onert-micro.cpp.

415{
416 return session->train_import_checkpoint(path);
417}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_input_tensorinfo()

NNFW_STATUS nnfw_train_input_tensorinfo ( nnfw_session session,
uint32_t  index,
nnfw_tensorinfo info 
)

Get the training model input information.

Note
This function should be called after nnfw_train_prepare
Parameters
[in]sessionThe session to get the training model input information
[in]indexThe index of training model input
[out]infoThe shape and type of training model input
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 256 of file nnfw_api.cc.

258{
260 return session->train_input_tensorinfo(index, info);
261}

References info, and NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_prepare()

NNFW_STATUS nnfw_train_prepare ( nnfw_session session)

Prepare session to be ready for training.

Note
The session will be entered into training mode
   If training info is NOT set in session, this function returns @c NNFW_STATUS_ERROR .
   You should set training info using {@link nnfw_train_set_traininfo}.
Parameters
[in]sessionThe session to be prepared for training
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 397 of file onert-micro.cpp.

397{ return session->train_prepare(); }

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_set_expected()

NNFW_STATUS nnfw_train_set_expected ( nnfw_session session,
uint32_t  index,
const void *  expected,
const nnfw_tensorinfo expected_info 
)

Set training expected output.

Note
This function should be called after nnfw_train_prepare
Parameters
sessionThe session to be set training inputs and expected model outputs
indexThe index of training expected output
expectedThe expected buffers for training
expected_infoThe shape and type of expected buffer If it is nullptr, it will not change shape and batch size
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 277 of file nnfw_api.cc.

279{
281 return session->train_set_expected(index, expected, expected_info);
282}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_set_input()

NNFW_STATUS nnfw_train_set_input ( nnfw_session session,
uint32_t  index,
const void *  input,
const nnfw_tensorinfo input_info 
)

Set training input.

Note
This function should be called after nnfw_train_prepare
Parameters
[in]sessionThe session to be set training inputs and expected model outputs
[in]indexThe index of training input
[in]inputThe input buffers for training
[in]input_infoThe shape and type of input buffer If it is nullptr, it will not change shape and batch size
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 270 of file nnfw_api.cc.

272{
274 return session->train_set_input(index, input, input_info);
275}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_set_output()

NNFW_STATUS nnfw_train_set_output ( nnfw_session session,
uint32_t  index,
NNFW_TYPE  type,
void *  buffer,
size_t  length 
)

Set training output buffer.

This function must be called after nnfw_train_prepare, buffer given to this function can be reused for training. length must be greater or equal than the operand requires. An output operand can have unspecified shape and deduced dynamically during the execution. You must provide buffer large enough.

Parameters
[in]sessionSession from inference output is to be extracted
[in]indexIndex of output to be set (0-indexed)
[in]typeType of the output
[out]bufferRaw buffer for output
[in]lengthSize of bytes of output buffer
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 445 of file onert-micro.cpp.

447{
449 return session->train_set_output(index, type, buffer, length);
450}

References NNFW_RETURN_ERROR_IF_NULL.

◆ nnfw_train_set_traininfo()

NNFW_STATUS nnfw_train_set_traininfo ( nnfw_session session,
const nnfw_train_info info 
)

Set training information.

Note
This function should be called after calling nnfw_load_model_from_file and before calling nnfw_train_prepare
Parameters
[in]sessionThe session to be set training information
[in]infoThe training information
Returns
NNFW_STATUS_NO_ERROR if successful

Definition at line 439 of file onert-micro.cpp.

440{
442 return session->train_set_traininfo(info);
443}

References info, and NNFW_RETURN_ERROR_IF_NULL.