54 REQUIRE(learning_rate>0,
"Learning_rate (%f) must be positive\n",
61 REQUIRE(epsilon>=0,
"Epsilon (%f) must be non-negative\n",
68 REQUIRE(decay_factor>=0.0 && decay_factor<1.0,
69 "decay factor (%f) must in [0,1)\n",
76 void RmsPropUpdater::init()
97 "Index (%d) is invalid\n", idx);
108 REQUIRE(variable_reference.
vlen>0,
"variable_reference must set\n");
110 "The length of variable_reference (%d) and the length of gradient (%d) do not match\n",
111 variable_reference.
vlen,raw_negative_descend_direction.
vlen);
SGVector< float64_t > m_gradient_accuracy
virtual void set_decay_factor(float64_t decay_factor)
virtual float64_t get_negative_descend_direction(float64_t variable, float64_t gradient, index_t idx, float64_t learning_rate)
float64_t m_build_in_learning_rate
virtual void set_learning_rate(float64_t learning_rate)
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual void set_epsilon(float64_t epsilon)
virtual ~RmsPropUpdater()
all of classes and functions are contained in the shogun namespace
This is a base class for descend update with descend based correction.
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
static float32_t sqrt(float32_t x)
void set_const(T const_elem)