17#ifndef __ONERT_BACKEND_TRAIN_OPTIMIZER_ADAM_H__
18#define __ONERT_BACKEND_TRAIN_OPTIMIZER_ADAM_H__
52 explicit Adam() : _props{}, _learning_rate{0.001} {}
53 explicit Adam(
const Property &props) : _props{props}, _learning_rate{0.001} {}
54 explicit Adam(
double lr) : _props{}, _learning_rate{lr} {}
55 explicit Adam(
const Property &props,
double lr) : _props{props}, _learning_rate{lr} {}
63 std::string
name()
const override {
return std::string{
"Adam"}; }
89 double _learning_rate;
void applyGradient(const UpdateFactors &factors) const override
Apply gradient to a trainable tensor.
virtual uint32_t getVarCount() const override
Get the number of optimizer variables s.
exec::train::optimizer::UpdateFactors UpdateFactors
std::string name() const override
Get the name of optimizer.
Adam(const Property &props)
double getLearningRate(uint32_t training_step) const override
Get the Learning Rate.
Adam(const Property &props, double lr)
Base class for all optimizers.
std::tuple< const backend::IPortableTensor &, backend::train::ITrainableTensor &, size_t > UpdateFactors