SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
SGDQN.cpp
Go to the documentation of this file.
1 /*
2  SVM with Quasi-Newton stochastic gradient
3  Copyright (C) 2009- Antoine Bordes
4 
5  This program is free software; you can redistribute it and/or
6  modify it under the terms of the GNU Lesser General Public
7  License as published by the Free Software Foundation; either
8  version 2.1 of the License, or (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with this program; if not, write to the Free Software
17  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA
18 
19  Shogun adjustments (w) 2011 Siddharth Kherada
20 */
21 
23 #include <shogun/base/Parameter.h>
24 #include <shogun/lib/Signal.h>
26 #include <shogun/loss/HingeLoss.h>
28 
29 using namespace shogun;
30 
33 {
34  init();
35 }
36 
39 {
40  init();
41 
42  C1=C;
43  C2=C;
44 }
45 
46 CSGDQN::CSGDQN(float64_t C, CDotFeatures* traindat, CLabels* trainlab)
48 {
49  init();
50  C1=C;
51  C2=C;
52 
53  set_features(traindat);
54  set_labels(trainlab);
55 }
56 
58 {
59  SG_UNREF(loss);
60 }
61 
63 {
64  if (loss)
65  SG_UNREF(loss);
66  loss=loss_func;
67  SG_REF(loss);
68 }
69 
70 void CSGDQN::compute_ratio(float64_t* W,float64_t* W_1,float64_t* B,float64_t* dst,int32_t dim,float64_t lambda,float64_t loss_val)
71 {
72  for (int32_t i=0; i < dim;i++)
73  {
74  float64_t diffw=W_1[i]-W[i];
75  if(diffw)
76  B[i]+=diffw/ (lambda*diffw+ loss_val*dst[i]);
77  else
78  B[i]+=1/lambda;
79  }
80 }
81 
83 {
84  for (int32_t i=0; i < dim;i++)
85  {
86  if(B[i])
87  {
88  Bc[i] = Bc[i] * c1 + B[i] * c2;
89  Bc[i]= CMath::min(CMath::max(Bc[i],v1),v2);
90  }
91  }
92 }
93 
95 {
96 
99 
100  if (data)
101  {
102  if (!data->has_property(FP_DOT))
103  SG_ERROR("Specified features are not of type CDotFeatures\n");
104  set_features((CDotFeatures*) data);
105  }
106 
107  ASSERT(features);
108 
109  int32_t num_train_labels=m_labels->get_num_labels();
110  int32_t num_vec=features->get_num_vectors();
111 
112  ASSERT(num_vec==num_train_labels);
113  ASSERT(num_vec>0);
114 
116  w.zero();
117 
118  float64_t lambda= 1.0/(C1*num_vec);
119 
120  // Shift t in order to have a
121  // reasonable initial learning rate.
122  // This assumes |x| \approx 1.
123  float64_t maxw = 1.0 / sqrt(lambda);
124  float64_t typw = sqrt(maxw);
125  float64_t eta0 = typw / CMath::max(1.0,-loss->first_derivative(-typw,1));
126  t = 1 / (eta0 * lambda);
127 
128  SG_INFO("lambda=%f, epochs=%d, eta0=%f\n", lambda, epochs, eta0);
129 
130 
132  SGVector<float64_t>::fill_vector(Bc, w.vlen, 1/lambda);
133 
134  float64_t* result=SG_MALLOC(float64_t, w.vlen);
136 
137  //Calibrate
138  calibrate();
139 
140  SG_INFO("Training on %d vectors\n", num_vec);
142 
143  ELossType loss_type = loss->get_loss_type();
144  bool is_log_loss = false;
145  if ((loss_type == L_LOGLOSS) || (loss_type == L_LOGLOSSMARGIN))
146  is_log_loss = true;
147 
148  for(int32_t e=0; e<epochs && (!CSignal::cancel_computations()); e++)
149  {
150  count = skip;
151  bool updateB=false;
152  for (int32_t i=0; i<num_vec; i++)
153  {
155  ASSERT(w.vlen==v.vlen);
156  float64_t eta = 1.0/t;
157  float64_t y = ((CBinaryLabels*) m_labels)->get_label(i);
158  float64_t z = y * features->dense_dot(i, w.vector, w.vlen);
159  if(updateB==true)
160  {
161  if (z < 1 || is_log_loss)
162  {
164  float64_t loss_1=-loss->first_derivative(z,1);
166  SGVector<float64_t>::add(w.vector,eta*loss_1*y,result,1.0,w.vector,w.vlen);
167  float64_t z2 = y * features->dense_dot(i, w.vector, w.vlen);
168  float64_t diffloss = -loss->first_derivative(z2,1) - loss_1;
169  if(diffloss)
170  {
171  compute_ratio(w.vector,w_1.vector,B,v.vector,w.vlen,lambda,y*diffloss);
172  if(t>skip)
173  combine_and_clip(Bc,B,w.vlen,(t-skip)/(t+skip),2*skip/(t+skip),1/(100*lambda),100/lambda);
174  else
175  combine_and_clip(Bc,B,w.vlen,t/(t+skip),skip/(t+skip),1/(100*lambda),100/lambda);
176  }
177  }
178  updateB=false;
179  }
180  else
181  {
182  if(--count<=0)
183  {
185  SGVector<float64_t>::add(w.vector,-skip*lambda*eta,result,1.0,w.vector,w.vlen);
186  count = skip;
187  updateB=true;
188  }
189 
190  if (z < 1 || is_log_loss)
191  {
193  SGVector<float64_t>::add(w.vector,eta*-loss->first_derivative(z,1)*y,result,1.0,w.vector,w.vlen);
194  }
195  }
196  t++;
197  }
198  }
199  SG_FREE(result);
200  SG_FREE(B);
201 
202  return true;
203 }
204 
205 
206 
208 {
209  ASSERT(features);
210  int32_t num_vec=features->get_num_vectors();
211  int32_t c_dim=features->get_dim_feature_space();
212 
213  ASSERT(num_vec>0);
214  ASSERT(c_dim>0);
215 
216  SG_INFO("Estimating sparsity num_vec=%d num_feat=%d.\n", num_vec, c_dim);
217 
218  int32_t n = 0;
219  float64_t r = 0;
220 
221  for (int32_t j=0; j<num_vec ; j++, n++)
223 
224 
225  // compute weight decay skip
226  skip = (int32_t) ((16 * n * c_dim) / r);
227 }
228 
229 void CSGDQN::init()
230 {
231  t=0;
232  C1=1;
233  C2=1;
234  epochs=5;
235  skip=1000;
236  count=1000;
237 
238  loss=new CHingeLoss();
239  SG_REF(loss);
240 
241  m_parameters->add(&C1, "C1", "Cost constant 1.");
242  m_parameters->add(&C2, "C2", "Cost constant 2.");
243  m_parameters->add(&epochs, "epochs", "epochs");
244  m_parameters->add(&skip, "skip", "skip");
245  m_parameters->add(&count, "count", "count");
246 }

SHOGUN Machine Learning Toolbox - Documentation