SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
FirstOrderStochasticMinimizer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2015 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  */
31 
35 #include <shogun/base/Parameter.h>
36 using namespace shogun;
37 
39 {
40  REQUIRE(gradient_updater, "Gradient updater must set\n");
41  if(m_gradient_updater != gradient_updater)
42  {
43  SG_REF(gradient_updater);
45  m_gradient_updater=gradient_updater;
46  }
47 }
48 
50 {
53 }
54 
56 {
57  REQUIRE(num_passes>0, "The number (%d) to go through data must be positive\n", num_passes);
58  m_num_passes=num_passes;
59 }
60 
62 {
63  if(m_learning_rate != learning_rate)
64  {
65  SG_REF(learning_rate);
67  m_learning_rate=learning_rate;
68  }
69 }
70 
72 {
73  ProximalPenalty* proximal_penalty=dynamic_cast<ProximalPenalty*>(m_penalty_type);
74  if(proximal_penalty)
75  {
76  float64_t proximal_weight=m_penalty_weight;
77  SparsePenalty* sparse_penalty=dynamic_cast<SparsePenalty*>(m_penalty_type);
78  if(sparse_penalty)
79  {
80  REQUIRE(m_learning_rate, "Learning rate must set when Sparse Penalty (eg, L1) is used\n");
82  }
83  proximal_penalty->update_variable_for_proximity(variable_reference,proximal_weight);
84  }
85 }
86 
88 {
89  REQUIRE(m_fun,"Cost function must set\n");
90  REQUIRE(m_gradient_updater,"Descend updater must set\n");
91  REQUIRE(m_num_passes>0, "The number to go through data must set\n");
92  m_cur_passes=0;
93 }
94 
95 void FirstOrderStochasticMinimizer::init()
96 {
97  m_gradient_updater=NULL;
98  m_learning_rate=NULL;
99  m_num_passes=0;
100  m_cur_passes=0;
101  m_iter_counter=0;
102 
103  SG_ADD((CSGObject **)&m_learning_rate, "FirstOrderMinimizer__m_learning_rate",
104  "learning_rate in FirstOrderStochasticMinimizer", MS_NOT_AVAILABLE);
105  SG_ADD((CSGObject **)&m_gradient_updater, "FirstOrderMinimizer__m_gradient_updater",
106  "gradient_updater in FirstOrderStochasticMinimizer", MS_NOT_AVAILABLE);
107  SG_ADD(&m_num_passes, "FirstOrderMinimizer__m_num_passes",
108  "num_passes in FirstOrderStochasticMinimizer", MS_NOT_AVAILABLE);
109  SG_ADD(&m_cur_passes, "FirstOrderMinimizer__m_cur_passes",
110  "cur_passes in FirstOrderStochasticMinimizer", MS_NOT_AVAILABLE);
111  SG_ADD(&m_iter_counter, "FirstOrderMinimizer__m_iter_counter",
112  "m_iter_counter in FirstOrderStochasticMinimizer", MS_NOT_AVAILABLE);
113 }
virtual void set_learning_rate(LearningRate *learning_rate)
The base class about learning rate for descent-based minimizers.
Definition: LearningRate.h:47
FirstOrderCostFunction * m_fun
#define REQUIRE(x,...)
Definition: SGIO.h:206
The base class for sparse penalty/regularization used in minimization.
Definition: SparsePenalty.h:46
The base class for sparse penalty/regularization used in minimization.
#define SG_REF(x)
Definition: SGObject.h:54
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:115
double float64_t
Definition: common.h:50
virtual void set_number_passes(int32_t num_passes)
virtual void update_variable_for_proximity(SGVector< float64_t > variable, float64_t proximal_weight)=0
virtual void do_proximal_operation(SGVector< float64_t >variable_reference)
virtual void set_gradient_updater(DescendUpdater *gradient_updater)
This is a base class for descend update.
#define SG_UNREF(x)
Definition: SGObject.h:55
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual float64_t get_learning_rate(int32_t iter_counter)=0
#define SG_ADD(...)
Definition: SGObject.h:84

SHOGUN Machine Learning Toolbox - Documentation