53 REQUIRE(learning_rate>0,
"Learning_rate (%f) must be positive\n",
60 REQUIRE(epsilon>=0,
"Epsilon (%f) must be non-negative\n",
67 REQUIRE(decay_factor>=0.0 && decay_factor<1.0,
68 "Decay factor (%f) must in [0,1)\n",
77 void AdaDeltaUpdater::init()
89 REQUIRE(context,
"Context must set\n");
95 std::string key=
"AdaDeltaUpdater::m_gradient_accuracy";
102 key=
"AdaDeltaUpdater::m_gradient_delta_accuracy";
109 REQUIRE(context,
"context must set\n");
111 std::string key=
"AdaDeltaUpdater::m_gradient_accuracy";
117 key=
"AdaDeltaUpdater::m_gradient_delta_accuracy";
128 "Index (%d) is invalid\n", idx);
130 "Index (%d) is invalid\n", idx);
143 REQUIRE(variable_reference.
vlen>0,
"variable_reference must set\n");
145 "The length of variable_reference (%d) and the length of gradient (%d) do not match\n",
146 variable_reference.
vlen,raw_negative_descend_direction.
vlen);
158 if(momentum_correction)
164 for(
index_t idx=0; idx<variable_reference.
vlen; idx++)
167 variable_reference[idx], raw_negative_descend_direction[idx], idx, learning_rate);
172 variable_reference[idx]+=pair.descend_direction;
virtual void save_data(const std::string &key, SGVector< float64_t > value)
virtual void set_decay_factor(float64_t decay_factor)
SGVector< float64_t > m_gradient_delta_accuracy
The class is used to serialize and deserialize variables for the optimization framework.
DescendCorrection * m_correction
virtual void load_from_context(CMinimizerContext *context)
virtual void initialize_previous_direction(index_t len)
virtual SGVector< float64_t > get_data_sgvector_float64(const std::string &key)
static const float64_t epsilon
virtual DescendPair get_corrected_descend_direction(float64_t negative_descend_direction, index_t idx)=0
virtual ~AdaDeltaUpdater()
virtual void update_context(CMinimizerContext *context)
virtual void load_from_context(CMinimizerContext *context)
virtual void set_epsilon(float64_t epsilon)
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
all of classes and functions are contained in the shogun namespace
This is a base class for momentum correction methods.
This is a base class for descend update with descend based correction.
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
float64_t m_build_in_learning_rate
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > raw_negative_descend_direction, float64_t learning_rate)
virtual bool is_initialized()
static float32_t sqrt(float32_t x)
virtual float64_t get_negative_descend_direction(float64_t variable, float64_t gradient, index_t idx, float64_t learning_rate)
SGVector< float64_t > m_gradient_accuracy
virtual void set_learning_rate(float64_t learning_rate)
void set_const(T const_elem)
virtual void update_context(CMinimizerContext *context)