64             "The length (%d) of dual variable must match the length (%d) of variable\n",
 
   68     REQUIRE(penalty_type,
"For now only L1Penalty is supported. Please use the penalty for this minimizer\n");
 
   71     REQUIRE(fun,
"the cost function must be a stochastic cost function\n");
 
   90 void SMIDASMinimizer::init()
 
  105             SG_SWARNING(
"There is not theoretical guarantee when Descend Correction is enabled\n");
 
  109         if(!gradient_updater)
 
  111             SG_SWARNING(
"There is not theoretical guarantee when this updater is used\n");
 
  116         SG_SWARNING(
"There is not theoretical guarantee when this updater is used\n");
 
virtual SGVector< float64_t > get_gradient()=0
 
The class implements the stochastic mirror descend (SMD) minimizer. 
 
DescendUpdater * m_gradient_updater
 
virtual void update_variable(SGVector< float64_t > variable, SGVector< float64_t > dual_variable)=0
 
The is the base class for L1 penalty/regularization within the FirstOrderMinimizer framework...
 
float64_t m_penalty_weight
 
virtual void begin_sample()=0
 
FirstOrderCostFunction * m_fun
 
virtual void init_minimization()
 
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > negative_descend_direction, float64_t learning_rate)=0
 
The first order stochastic cost function base class. 
 
virtual float64_t minimize()
 
virtual ~SMIDASMinimizer()
 
MappingFunction * m_mapping_fun
 
virtual void init_minimization()
 
LearningRate * m_learning_rate
 
virtual SGVector< float64_t > get_dual_variable(SGVector< float64_t > variable)=0
 
virtual float64_t get_penalty(SGVector< float64_t > var)
 
virtual float64_t get_cost()=0
 
virtual void update_variable_for_proximity(SGVector< float64_t > variable, float64_t proximal_weight)
 
The class implements the gradient descend method. 
 
all of classes and functions are contained in the shogun namespace 
 
SGVector< float64_t > m_dual_variable
 
virtual bool next_sample()=0
 
This is a base class for descend update with descend based correction. 
 
virtual bool enables_descend_correction()
 
virtual float64_t get_learning_rate(int32_t iter_counter)=0
 
virtual SGVector< float64_t > obtain_variable_reference()=0