ONE - On-device Neural Engine
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
train_with_dataset Namespace Reference

Functions

 initParse ()
 
 createOptimizer (optimizer_type, learning_rate=0.001, **kwargs)
 
 createLoss (loss_type, reduction="mean")
 
 train (args)
 

Variables

 args = initParse()
 

Function Documentation

◆ createLoss()

train_with_dataset.createLoss (   loss_type,
  reduction = "mean" 
)
Create a loss function based on the specified type and reduction.
Args:
    loss_type (str): The type of loss function ('mse', 'cce').
    reduction (str): Reduction type ('mean', 'sum').
Returns:
    object: An instance of the specified loss function.

Definition at line 54 of file train_with_dataset.py.

54def createLoss(loss_type, reduction="mean"):
55 """
56 Create a loss function based on the specified type and reduction.
57 Args:
58 loss_type (str): The type of loss function ('mse', 'cce').
59 reduction (str): Reduction type ('mean', 'sum').
60 Returns:
61 object: An instance of the specified loss function.
62 """
63 if loss_type.lower() == "mse":
64 return losses.MeanSquaredError(reduction=reduction)
65 elif loss_type.lower() == "cce":
66 return losses.CategoricalCrossentropy(reduction=reduction)
67 else:
68 raise ValueError(f"Unknown loss type: {loss_type}")
69
70

Referenced by train().

◆ createOptimizer()

train_with_dataset.createOptimizer (   optimizer_type,
  learning_rate = 0.001,
**  kwargs 
)
Create an optimizer based on the specified type.
Args:
    optimizer_type (str): The type of optimizer ('SGD' or 'Adam').
    learning_rate (float): The learning rate for the optimizer.
    **kwargs: Additional parameters for the optimizer.
Returns:
    Optimizer: The created optimizer instance.

Definition at line 36 of file train_with_dataset.py.

36def createOptimizer(optimizer_type, learning_rate=0.001, **kwargs):
37 """
38 Create an optimizer based on the specified type.
39 Args:
40 optimizer_type (str): The type of optimizer ('SGD' or 'Adam').
41 learning_rate (float): The learning rate for the optimizer.
42 **kwargs: Additional parameters for the optimizer.
43 Returns:
44 Optimizer: The created optimizer instance.
45 """
46 if optimizer_type.lower() == "sgd":
47 return optimizer.SGD(learning_rate=learning_rate, **kwargs)
48 elif optimizer_type.lower() == "adam":
49 return optimizer.Adam(learning_rate=learning_rate, **kwargs)
50 else:
51 raise ValueError(f"Unknown optimizer type: {optimizer_type}")
52
53

Referenced by train().

◆ initParse()

train_with_dataset.initParse ( )

Definition at line 5 of file train_with_dataset.py.

5def initParse():
6 parser = argparse.ArgumentParser()
7 parser.add_argument('-m',
8 '--nnpkg',
9 required=True,
10 help='Path to the nnpackage file or directory')
11 parser.add_argument('-i',
12 '--input',
13 required=True,
14 help='Path to the file containing input data (e.g., .npy or raw)')
15 parser.add_argument(
16 '-l',
17 '--label',
18 required=True,
19 help='Path to the file containing label data (e.g., .npy or raw).')
20 parser.add_argument('--data_length', required=True, type=int, help='data length')
21 parser.add_argument('--backends', default='train', help='Backends to use')
22 parser.add_argument('--batch_size', default=16, type=int, help='batch size')
23 parser.add_argument('--epoch', default=5, type=int, help='epoch number')
24 parser.add_argument('--learning_rate', default=0.01, type=float, help='learning rate')
25 parser.add_argument('--loss', default='mse', choices=['mse', 'cce'])
26 parser.add_argument('--optimizer', default='sgd', choices=['sgd', 'adam'])
27 parser.add_argument('--loss_reduction_type', default='mean', choices=['mean', 'sum'])
28 parser.add_argument('--validation_split',
29 default=0.0,
30 type=float,
31 help='validation split rate')
32
33 return parser.parse_args()
34
35

◆ train()

train_with_dataset.train (   args)
Main function to train the model.

Definition at line 71 of file train_with_dataset.py.

71def train(args):
72 """
73 Main function to train the model.
74 """
75 # Create session and load nnpackage
76 sess = session(args.nnpkg, args.backends)
77
78 # Load data
79 input_shape = sess.input_tensorinfo(0).dims
80 label_shape = sess.output_tensorinfo(0).dims
81
82 input_shape[0] = args.data_length
83 label_shape[0] = args.data_length
84
85 data_loader = DataLoader(args.input,
86 args.label,
87 args.batch_size,
88 input_shape=input_shape,
89 expected_shape=label_shape)
90 print('Load data')
91
92 # optimizer
93 opt_fn = createOptimizer(args.optimizer, args.learning_rate)
94
95 # loss
96 loss_fn = createLoss(args.loss, reduction=args.loss_reduction_type)
97
98 sess.compile(optimizer=opt_fn,
99 loss=loss_fn,
100 batch_size=args.batch_size,
101 metrics=[metrics.CategoricalAccuracy()])
102
103 # Train model
104 total_time = sess.train(data_loader,
105 epochs=args.epoch,
106 validation_split=args.validation_split,
107 checkpoint_path="checkpoint.ckpt")
108
109 # Print timing summary
110 print("=" * 35)
111 print(f"MODEL_LOAD takes {total_time['MODEL_LOAD']:.4f} ms")
112 print(f"COMPILE takes {total_time['COMPILE']:.4f} ms")
113 print(f"EXECUTE takes {total_time['EXECUTE']:.4f} ms")
114 epoch_times = total_time['EPOCH_TIMES']
115 for i, epoch_time in enumerate(epoch_times):
116 print(f"- Epoch {i + 1} takes {epoch_time:.4f} ms")
117 print("=" * 35)
118
119 print(f"nnpackage {args.nnpkg.split('/')[-1]} trains successfully.")
120
121

References createLoss(), and createOptimizer().

Variable Documentation

◆ args

train_with_dataset.args = initParse()

Definition at line 123 of file train_with_dataset.py.