SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
OnlineLibLinear.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2007-2010 Soeren Sonnenburg
8  * Written (W) 2011 Shashwat Lal Das
9  * Modifications (W) 2013 Thoralf Klein
10  * Copyright (c) 2007-2009 The LIBLINEAR Project.
11  * Copyright (C) 2007-2010 Fraunhofer Institute FIRST and Max-Planck-Society
12  */
13 
17 #include <shogun/lib/Time.h>
18 
19 using namespace shogun;
20 
23 {
24  init();
25 }
26 
28 {
29  init();
30  C1=C_reg;
31  C2=C_reg;
32  use_bias=true;
33 }
34 
36  float64_t C_reg, CStreamingDotFeatures* traindat)
37 {
38  init();
39  C1=C_reg;
40  C2=C_reg;
41  use_bias=true;
42 
43  set_features(traindat);
44 }
45 
47 {
48  init();
49  C1 = mch->C1;
50  C2 = mch->C2;
51  use_bias = mch->use_bias;
52 
53  set_features(mch->features);
54 
55  w_dim = mch->w_dim;
56  if (w_dim > 0)
57  {
58  w = SG_MALLOC(float32_t, w_dim);
59  memcpy(w, mch->w, w_dim*sizeof(float32_t));
60  }
61  else
62  {
63  w = NULL;
64  }
65  bias = mch->bias;
66 }
67 
68 
69 void COnlineLibLinear::init()
70 {
71  C1=1;
72  C2=1;
73  Cp=1;
74  Cn=1;
75  use_bias=false;
76 
77  m_parameters->add(&C1, "C1", "C Cost constant 1.");
78  m_parameters->add(&C2, "C2", "C Cost constant 2.");
79  m_parameters->add(&use_bias, "use_bias", "Indicates if bias is used.");
80 
81  PG = 0;
82  PGmax_old = CMath::INFTY;
83  PGmin_old = -CMath::INFTY;
84  PGmax_new = -CMath::INFTY;
85  PGmin_new = CMath::INFTY;
86 
87  diag[0]=0;diag[1]=0;diag[2]=0;
88  upper_bound[0]=Cn;upper_bound[1]=0;upper_bound[2]=Cp;
89 
90  v = 0;
91  nSV = 0;
92 
93  // TODO: "local" variables only used in one method
94  C = 0;
95  d = 0;
96  G = 0;
97  QD = 0;
98  alpha_current = 0;
99 }
100 
102 {
103 }
104 
106 {
107  Cp = C1;
108  Cn = C2;
109  bias = false;
110 
111  PGmax_old = CMath::INFTY;
112  PGmin_old = -CMath::INFTY;
113  PGmax_new = -CMath::INFTY;
114  PGmin_new = CMath::INFTY;
115 
116  diag[0]=0;diag[1]=0;diag[2]=0;
117  upper_bound[0]=Cn;upper_bound[1]=0;upper_bound[2]=Cp;
118 
119  v = 0;
120  nSV = 0;
121 }
122 
124 {
125  float64_t gap = PGmax_new - PGmin_new;
126 
127  SG_DONE()
128  SG_INFO("Optimization finished.\n")
129 
130  // calculate objective value
131  for (int32_t i=0; i<w_dim; i++)
132  v += w[i]*w[i];
133  v += bias*bias;
134 
135  SG_INFO("Objective value = %lf\n", v/2)
136  SG_INFO("nSV = %d\n", nSV)
137  SG_INFO("gap = %g\n", gap)
138 }
139 
141 {
142  alpha_current = 0;
143  int32_t y_current = 0;
144  if (label > 0)
145  y_current = +1;
146  else
147  y_current = -1;
148 
149  QD = diag[y_current + 1];
150  // Dot product of vector with itself
151  QD += SGVector<float32_t>::dot(ex.vector, ex.vector, ex.vlen);
152 
153  // Dot product of vector with learned weights
155 
156  if (use_bias)
157  G += bias;
158  G = G*y_current - 1;
159  // LINEAR TERM PART?
160 
161  C = upper_bound[y_current + 1];
162  G += alpha_current*diag[y_current + 1]; // Can be eliminated, since diag = 0 vector
163 
164  PG = 0;
165  if (alpha_current == 0) // This condition will always be true in the online version
166  {
167  if (G > PGmax_old)
168  {
169  return;
170  }
171  else if (G < 0)
172  PG = G;
173  }
174  else if (alpha_current == C)
175  {
176  if (G < PGmin_old)
177  {
178  return;
179  }
180  else if (G > 0)
181  PG = G;
182  }
183  else
184  PG = G;
185 
186  PGmax_new = CMath::max(PGmax_new, PG);
187  PGmin_new = CMath::min(PGmin_new, PG);
188 
189  if (fabs(PG) > 1.0e-12)
190  {
191  float64_t alpha_old = alpha_current;
192  alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
193  d = (alpha_current - alpha_old) * y_current;
194 
195  for (int32_t i=0; i < w_dim; ++i)
196  w[i] += d*ex[i];
197 
198 
199  if (use_bias)
200  bias += d;
201  }
202 
203  v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
204  if (alpha_current > 0)
205  nSV++;
206 }
207 
209 {
210  alpha_current = 0;
211  int32_t y_current = 0;
212  if (label > 0)
213  y_current = +1;
214  else
215  y_current = -1;
216 
217  QD = diag[y_current + 1];
218  // Dot product of vector with itself
220 
221  // Dot product of vector with learned weights
222  G = ex.dense_dot(1.0,w,w_dim,0.0);
223 
224  if (use_bias)
225  G += bias;
226  G = G*y_current - 1;
227  // LINEAR TERM PART?
228 
229  C = upper_bound[y_current + 1];
230  G += alpha_current*diag[y_current + 1]; // Can be eliminated, since diag = 0 vector
231 
232  PG = 0;
233  if (alpha_current == 0) // This condition will always be true in the online version
234  {
235  if (G > PGmax_old)
236  {
237  return;
238  }
239  else if (G < 0)
240  PG = G;
241  }
242  else if (alpha_current == C)
243  {
244  if (G < PGmin_old)
245  {
246  return;
247  }
248  else if (G > 0)
249  PG = G;
250  }
251  else
252  PG = G;
253 
254  PGmax_new = CMath::max(PGmax_new, PG);
255  PGmin_new = CMath::min(PGmin_new, PG);
256 
257  if (fabs(PG) > 1.0e-12)
258  {
259  float64_t alpha_old = alpha_current;
260  alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
261  d = (alpha_current - alpha_old) * y_current;
262 
263  for (int32_t i=0; i < ex.num_feat_entries; i++)
264  w[ex.features[i].feat_index] += d*ex.features[i].entry;
265 
266 
267  if (use_bias)
268  bias += d;
269  }
270 
271  v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
272  if (alpha_current > 0)
273  nSV++;
274 }
275 
277 {
279 
282  dynamic_cast<CStreamingDenseFeatures<float32_t> *>(feature);
283  if (feat == NULL)
284  SG_ERROR("Expected streaming dense feature <float32_t>\n")
285 
286  train_one(feat->get_vector(), label);
287  }
290  dynamic_cast<CStreamingSparseFeatures<float32_t> *>(feature);
291  if (feat == NULL)
292  SG_ERROR("Expected streaming sparse feature <float32_t>\n")
293 
294  train_one(feat->get_vector(), label);
295  }
296  else {
298  }
299 }

SHOGUN Machine Learning Toolbox - Documentation