ONE - On-device Neural Engine
Loading...
Searching...
No Matches
onert-micro.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef _ONERT_MICRO_H_
18#define _ONERT_MICRO_H_
19
20#ifdef __cplusplus
21extern "C" {
22#endif
23
24/*
25 * typical training flow in onert-micro
26 *
27 * 1. load model or checkpoint
28 * 1-1. (optional) configure training options
29 * 2. feed training input / output(e.g. label) data (cf. unit of a step)
30 * 3. train a step
31 * 4. check loss
32 * 4-0. save checkpoint for recovery/resume training
33 * 4-1. no more traning -> go to 5
34 * 4-2. more training -> go to 2
35 * 5. save current state to inference model
36 * 6. inference with inference model
37// sample example
38// 0. create context
39nnfw_session *session;
40nnfw_create_session(&session);
41// 1. load model (and checkpoint if continue training)
42nnfw_load_model_from_file(session, MODEL_PATH);
43// 1-1. (optional, TBD) configure training options
44nnfw_load_ckpt_from_file(session, CKPT_PATH);
45nnfw_train_prepare(session);
46float training_input[BATCH_SIZE*INPUT_SIZE];
47float training_label[BATCH_SIZE*OUTPUT_SIZE];
48// main training loop
49for(int epoch=0; epoch < NUM_EPOCHS; epoch++) {
50 for(int step=0; step < NUM_BATCHES ; step++) {
51 // prepare this steps's intput/label
52 memcpy(training_input, train_input_data + THIS_BATCH_OFFSET, BATCH_SIZE*INPUT_SIZE);
53 memcpy(training_output, train_output_data + THIS_BATCH_OFFSET, BATCH_SIZE*OUTPUT_SIZE);
54 // 2. feed training input / expected output
55 nnfw_train_set_input(session, 0 , training_input, NULL);
56 nnfw_train_set_expected(session, 0 , training_output, NULL);
57 // 3. train a step
58 nnfw_train(session);
59 }
60 // 4. check loss
61 float loss;
62 nnfw_train_get_loss(ctx, 0, &loss);
63 if(loss > TARGET_LOSS) {
64 nnfw_train_save_as_checkpoint(ctx, CKPT_PATH);
65 }
66 else {
67 nnfw_train_export_circle(ctx, CIRCLE_PATH);
68 }
69}
70*/
71
73
81
105
109#define NNFW_MAX_RANK (6)
110
135
137// Essential APIs for training
145
155
162
168
169typedef struct nnfw_adam_option
170{
171 float beta;
172 float beta2;
173 float epsilon;
175
179#define NNFW_TRAINABLE_OPS_MAX_SIZE (256)
180
205
217
232
242
253NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path);
254
267
281NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights);
282
292NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path);
293
294NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path);
295NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path);
296
308NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, void *input,
309 const nnfw_tensorinfo *input_info);
310
322NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, void *expected,
323 const nnfw_tensorinfo *expected_info);
324
334NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss);
335
352NNFW_STATUS nnfw_train_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type,
353 void *buffer, size_t length);
354
355#ifdef __cplusplus
356}
357#endif
358
359#endif //_ONERT_MICRO_H_
volatile const char info[]
NNFW_TYPE
Tensor types.
Definition nnfw.h:68
#define NNFW_MAX_RANK
Maximum rank expressible with nnfw.
Definition nnfw.h:163
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
NNFW_TRAIN_LOSS
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
NNFW_STATUS nnfw_create_session(nnfw_session **session)
Create a new session instance.
NNFW_STATUS
Result values returned from a call to an API function.
Definition onert-micro.h:86
@ NNFW_STATUS_INVALID_STATE
Definition onert-micro.h:97
@ NNFW_STATUS_UNEXPECTED_NULL
Definition onert-micro.h:95
@ NNFW_STATUS_NO_ERROR
Definition onert-micro.h:88
@ NNFW_STATUS_DEPRECATED_API
@ NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE
@ NNFW_STATUS_ERROR
Definition onert-micro.h:93
@ NNFW_STATUS_OUT_OF_MEMORY
Definition onert-micro.h:99
NNFW_STATUS nnfw_close_session(nnfw_session *session)
Close a session instance.
Definition nnfw_api.cc:60
NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss)
Get loss value for expected output.
NNFW_STATUS nnfw_train_export_checkpoint(nnfw_session *session, const char *path)
NNFW_TRAIN_LOSS_REDUCTION
@ NNFW_TRAIN_LOSS_REDUCTION_UNDEFINED
@ NNFW_TRAIN_LOSS_REDUCTION_SUM
@ NNFW_TRAIN_LOSS_REDUCTION_SUM_OVER_BATCH_SIZE
struct nnfw_train_info nnfw_train_info
Training information to prepare training.
NNFW_TRAIN_OPTIMIZER
@ NNFW_TRAIN_OPTIMIZER_ADAM
@ NNFW_TRAIN_OPTIMIZER_SGD
@ NNFW_TRAIN_OPTIMIZER_UNDEFINED
NNFW_STATUS nnfw_load_model_from_file(nnfw_session *session, const char *package_file_path)
Load model from nnpackage file or directory.
NNFW_STATUS nnfw_train_prepare(nnfw_session *session)
Prepare session to be ready for training.
NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights)
Train the model.
NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info *info)
Set training information.
NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, void *input, const nnfw_tensorinfo *input_info)
Set training input.
struct nnfw_loss_info nnfw_loss_info
struct nnfw_tensorinfo nnfw_tensorinfo
tensor info describes the type and shape of tensors
NNFW_STATUS nnfw_train_set_output(nnfw_session *session, uint32_t index, NNFW_TYPE type, void *buffer, size_t length)
Set training output buffer.
NNFW_STATUS nnfw_train_import_checkpoint(nnfw_session *session, const char *path)
NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, void *expected, const nnfw_tensorinfo *expected_info)
Set training expected output.
NNFW_TRAIN_LOSS
@ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR
@ NNFW_TRAIN_LOSS_UNDEFINED
@ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY
NNFW_TYPE
Definition onert-micro.h:75
@ NNFW_TYPE_TENSOR_INT32
Definition onert-micro.h:79
@ NNFW_TYPE_TENSOR_FLOAT32
Definition onert-micro.h:77
NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path)
Export current training model into circle model.
NNFW_TRAIN_LOSS_REDUCTION reduction_type
NNFW_TRAIN_LOSS loss
tensor info describes the type and shape of tensors
NNFW_TYPE dtype
int32_t dims[NNFW_MAX_RANK]
Training information to prepare training.
uint32_t batch_size
nnfw_loss_info loss_info
uint32_t num_trainble_ops
NNFW_TRAIN_OPTIMIZER opt
nnfw_adam_option adam_opt