SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
SMIDASMinimizer.cpp
Go to the documentation of this file.
1  /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2015 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  */
32 #include <shogun/lib/config.h>
35 using namespace shogun;
36 
38  :SMDMinimizer()
39 {
40  init();
41 }
42 
44 {
45 }
46 
48  :SMDMinimizer(fun)
49 {
50  init();
51 }
52 
54 {
55  REQUIRE(m_mapping_fun, "Mapping function must set\n");
58 
59  if(m_dual_variable.vlen==0)
60  m_dual_variable=m_mapping_fun->get_dual_variable(variable_reference);
61  else
62  {
63  REQUIRE(m_dual_variable.vlen==variable_reference.vlen,
64  "The length (%d) of dual variable must match the length (%d) of variable\n",
65  m_dual_variable.vlen, variable_reference.vlen);
66  }
67  L1Penalty* penalty_type=dynamic_cast<L1Penalty*>(m_penalty_type);
68  REQUIRE(penalty_type,"For now only L1Penalty is supported. Please use the penalty for this minimizer\n");
69 
71  REQUIRE(fun,"the cost function must be a stochastic cost function\n");
73  {
74  fun->begin_sample();
75  while(fun->next_sample())
76  {
79 
83  m_mapping_fun->update_variable(variable_reference, m_dual_variable);
84  }
85  }
86  float64_t cost=m_fun->get_cost();
87  return cost+get_penalty(variable_reference);
88 }
89 
90 void SMIDASMinimizer::init()
91 {
93 }
94 
96 {
100 
101  if(updater)
102  {
103  if (updater->enables_descend_correction())
104  {
105  SG_SWARNING("There is not theoretical guarantee when Descend Correction is enabled\n");
106  }
107  GradientDescendUpdater* gradient_updater=
109  if(!gradient_updater)
110  {
111  SG_SWARNING("There is not theoretical guarantee when this updater is used\n");
112  }
113  }
114  else
115  {
116  SG_SWARNING("There is not theoretical guarantee when this updater is used\n");
117  }
118  REQUIRE(m_learning_rate,"Learning Rate instance must set\n");
119 }
virtual SGVector< float64_t > get_gradient()=0
The class implements the stochastic mirror descend (SMD) minimizer.
Definition: SMDMinimizer.h:44
virtual void update_variable(SGVector< float64_t > variable, SGVector< float64_t > dual_variable)=0
The is the base class for L1 penalty/regularization within the FirstOrderMinimizer framework...
Definition: L1Penalty.h:52
#define SG_SWARNING(...)
Definition: SGIO.h:178
FirstOrderCostFunction * m_fun
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual void init_minimization()
virtual void update_variable(SGVector< float64_t > variable_reference, SGVector< float64_t > negative_descend_direction, float64_t learning_rate)=0
The first order stochastic cost function base class.
virtual float64_t minimize()
index_t vlen
Definition: SGVector.h:494
MappingFunction * m_mapping_fun
Definition: SMDMinimizer.h:100
virtual void init_minimization()
virtual SGVector< float64_t > get_dual_variable(SGVector< float64_t > variable)=0
double float64_t
Definition: common.h:50
virtual float64_t get_penalty(SGVector< float64_t > var)
virtual float64_t get_cost()=0
virtual void update_variable_for_proximity(SGVector< float64_t > variable, float64_t proximal_weight)
Definition: L1Penalty.h:97
The class implements the gradient descend method.
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
SGVector< float64_t > m_dual_variable
This is a base class for descend update with descend based correction.
virtual float64_t get_learning_rate(int32_t iter_counter)=0
virtual SGVector< float64_t > obtain_variable_reference()=0

SHOGUN Machine Learning Toolbox - Documentation