53 REQUIRE(learning_rate>0,
"Learning_rate (%f) must be positive\n",
60 REQUIRE(epsilon>=0,
"Epsilon (%f) must be non-negative\n",
67 void AdaGradUpdater::init()
95 "The length of variable (%d) and the length of negative descend direction (%d) do not match\n",
96 variable_reference.
vlen, raw_negative_descend_direction.
vlen);
103 raw_negative_descend_direction, learning_rate);
virtual float64_t get_negative_descend_direction(float64_t variable, float64_t gradient, index_t idx, float64_t learning_rate)
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual ~AdaGradUpdater()
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
SGVector< float64_t > m_gradient_accuracy
float64_t m_build_in_learning_rate
all of classes and functions are contained in the shogun namespace
This is a base class for descend update with descend based correction.
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
static float32_t sqrt(float32_t x)
virtual void set_learning_rate(float64_t learning_rate)
void set_const(T const_elem)
virtual void set_epsilon(float64_t epsilon)