56 REQUIRE(learning_rate>0,
"Learning_rate (%f) must be positive\n",
63 REQUIRE(epsilon>=0,
"Epsilon (%f) must be non-negative\n",
70 REQUIRE(decay_factor>=0.0 && decay_factor<1.0,
71 "Decay factor (%f) must in [0,1)\n",
80 void AdaDeltaUpdater::init()
90 SG_ADD(&m_gradient_delta_accuracy,
"AdaDeltaUpdater__m_gradient_delta_accuracy",
104 "Index (%d) is invalid\n", idx);
106 "Index (%d) is invalid\n", idx);
119 REQUIRE(variable_reference.
vlen>0,
"variable_reference must set\n");
121 "The length of variable_reference (%d) and the length of gradient (%d) do not match\n",
122 variable_reference.
vlen,raw_negative_descend_direction.
vlen);
134 if(momentum_correction)
140 for(
index_t idx=0; idx<variable_reference.
vlen; idx++)
143 variable_reference[idx], raw_negative_descend_direction[idx], idx, learning_rate);
148 variable_reference[idx]+=pair.descend_direction;
virtual void initialize_previous_direction(index_t len)
virtual void set_decay_factor(float64_t decay_factor)
SGVector< float64_t > m_gradient_delta_accuracy
DescendCorrection * m_correction
virtual DescendPair get_corrected_descend_direction(float64_t negative_descend_direction, index_t idx)=0
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual ~AdaDeltaUpdater()
virtual void set_epsilon(float64_t epsilon)
all of classes and functions are contained in the shogun namespace
This is a base class for momentum correction methods.
This is a base class for descend update with descend based correction.
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
float64_t m_build_in_learning_rate
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual bool is_initialized()
static float32_t sqrt(float32_t x)
virtual float64_t get_negative_descend_direction(float64_t variable, float64_t gradient, index_t idx, float64_t learning_rate)
SGVector< float64_t > m_gradient_accuracy
virtual void set_learning_rate(float64_t learning_rate)
void set_const(T const_elem)