17#ifndef __ONERT_BACKEND_TRAIN_OPTIMIZER_SGD_H__
18#define __ONERT_BACKEND_TRAIN_OPTIMIZER_SGD_H__
48 explicit SGD() : _props{}, _learning_rate{0.01} {}
49 explicit SGD(
const Property &props) : _props{props}, _learning_rate{0.01} {}
50 explicit SGD(
double lr) : _props{}, _learning_rate{lr} {}
51 explicit SGD(
const Property &props,
double lr) : _props{props}, _learning_rate{lr} {}
59 std::string
name()
const override {
return std::string{
"SGD"}; }
85 double _learning_rate;
SGD(const Property &props, double lr)
exec::train::optimizer::UpdateFactors UpdateFactors
double getLearningRate(uint32_t iteration=0) const override
Get the Learning Rate.
std::string name() const override
Get the name of optimizer.
SGD(const Property &props)
void applyGradient(const UpdateFactors &factors) const override
Apply gradient to a trainable tensor.
virtual uint32_t getVarCount() const override
Get the number of optimizer variables s.
Base class for all optimizers.
std::tuple< const backend::IPortableTensor &, backend::train::ITrainableTensor &, size_t > UpdateFactors