SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
OnlineLibLinear.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2007-2010 Soeren Sonnenburg
8  * Written (W) 2011 Shashwat Lal Das
9  * Modifications (W) 2013 Thoralf Klein
10  * Copyright (c) 2007-2009 The LIBLINEAR Project.
11  * Copyright (C) 2007-2010 Fraunhofer Institute FIRST and Max-Planck-Society
12  */
13 
18 #include <shogun/lib/Time.h>
19 
20 using namespace shogun;
21 
24 {
25  init();
26 }
27 
29 {
30  init();
31  C1=C_reg;
32  C2=C_reg;
33  use_bias=true;
34 }
35 
37  float64_t C_reg, CStreamingDotFeatures* traindat)
38 {
39  init();
40  C1=C_reg;
41  C2=C_reg;
42  use_bias=true;
43 
44  set_features(traindat);
45 }
46 
48 {
49  init();
50  C1 = mch->C1;
51  C2 = mch->C2;
52  use_bias = mch->use_bias;
53 
54  set_features(mch->features);
55 
56  w_dim = mch->w_dim;
57  if (w_dim > 0)
58  {
59  w = SG_MALLOC(float32_t, w_dim);
60  memcpy(w, mch->w, w_dim*sizeof(float32_t));
61  }
62  else
63  {
64  w = NULL;
65  }
66  bias = mch->bias;
67 }
68 
69 
70 void COnlineLibLinear::init()
71 {
72  C1=1;
73  C2=1;
74  Cp=1;
75  Cn=1;
76  use_bias=false;
77 
78  m_parameters->add(&C1, "C1", "C Cost constant 1.");
79  m_parameters->add(&C2, "C2", "C Cost constant 2.");
80  m_parameters->add(&use_bias, "use_bias", "Indicates if bias is used.");
81 
82  PG = 0;
83  PGmax_old = CMath::INFTY;
84  PGmin_old = -CMath::INFTY;
85  PGmax_new = -CMath::INFTY;
86  PGmin_new = CMath::INFTY;
87 
88  diag[0]=0;diag[1]=0;diag[2]=0;
89  upper_bound[0]=Cn;upper_bound[1]=0;upper_bound[2]=Cp;
90 
91  v = 0;
92  nSV = 0;
93 
94  // TODO: "local" variables only used in one method
95  C = 0;
96  d = 0;
97  G = 0;
98  QD = 0;
99  alpha_current = 0;
100 }
101 
103 {
104 }
105 
107 {
108  Cp = C1;
109  Cn = C2;
110  bias = false;
111 
112  PGmax_old = CMath::INFTY;
113  PGmin_old = -CMath::INFTY;
114  PGmax_new = -CMath::INFTY;
115  PGmin_new = CMath::INFTY;
116 
117  diag[0]=0;diag[1]=0;diag[2]=0;
118  upper_bound[0]=Cn;upper_bound[1]=0;upper_bound[2]=Cp;
119 
120  v = 0;
121  nSV = 0;
122 }
123 
125 {
126  float64_t gap = PGmax_new - PGmin_new;
127 
128  SG_DONE()
129  SG_INFO("Optimization finished.\n")
130 
131  // calculate objective value
132  for (int32_t i=0; i<w_dim; i++)
133  v += w[i]*w[i];
134  v += bias*bias;
135 
136  SG_INFO("Objective value = %lf\n", v/2)
137  SG_INFO("nSV = %d\n", nSV)
138  SG_INFO("gap = %g\n", gap)
139 }
140 
142 {
143  alpha_current = 0;
144  int32_t y_current = 0;
145  if (label > 0)
146  y_current = +1;
147  else
148  y_current = -1;
149 
150  QD = diag[y_current + 1];
151  // Dot product of vector with itself
152  QD += CMath::dot(ex.vector, ex.vector, ex.vlen);
153 
154  // Dot product of vector with learned weights
155  G = CMath::dot(ex.vector, w, w_dim);
156 
157  if (use_bias)
158  G += bias;
159  G = G*y_current - 1;
160  // LINEAR TERM PART?
161 
162  C = upper_bound[y_current + 1];
163  G += alpha_current*diag[y_current + 1]; // Can be eliminated, since diag = 0 vector
164 
165  PG = 0;
166  if (alpha_current == 0) // This condition will always be true in the online version
167  {
168  if (G > PGmax_old)
169  {
170  return;
171  }
172  else if (G < 0)
173  PG = G;
174  }
175  else if (alpha_current == C)
176  {
177  if (G < PGmin_old)
178  {
179  return;
180  }
181  else if (G > 0)
182  PG = G;
183  }
184  else
185  PG = G;
186 
187  PGmax_new = CMath::max(PGmax_new, PG);
188  PGmin_new = CMath::min(PGmin_new, PG);
189 
190  if (fabs(PG) > 1.0e-12)
191  {
192  float64_t alpha_old = alpha_current;
193  alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
194  d = (alpha_current - alpha_old) * y_current;
195 
196  for (int32_t i=0; i < w_dim; ++i)
197  w[i] += d*ex[i];
198 
199 
200  if (use_bias)
201  bias += d;
202  }
203 
204  v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
205  if (alpha_current > 0)
206  nSV++;
207 }
208 
210 {
211  alpha_current = 0;
212  int32_t y_current = 0;
213  if (label > 0)
214  y_current = +1;
215  else
216  y_current = -1;
217 
218  QD = diag[y_current + 1];
219  // Dot product of vector with itself
221 
222  // Dot product of vector with learned weights
223  G = ex.dense_dot(1.0,w,w_dim,0.0);
224 
225  if (use_bias)
226  G += bias;
227  G = G*y_current - 1;
228  // LINEAR TERM PART?
229 
230  C = upper_bound[y_current + 1];
231  G += alpha_current*diag[y_current + 1]; // Can be eliminated, since diag = 0 vector
232 
233  PG = 0;
234  if (alpha_current == 0) // This condition will always be true in the online version
235  {
236  if (G > PGmax_old)
237  {
238  return;
239  }
240  else if (G < 0)
241  PG = G;
242  }
243  else if (alpha_current == C)
244  {
245  if (G < PGmin_old)
246  {
247  return;
248  }
249  else if (G > 0)
250  PG = G;
251  }
252  else
253  PG = G;
254 
255  PGmax_new = CMath::max(PGmax_new, PG);
256  PGmin_new = CMath::min(PGmin_new, PG);
257 
258  if (fabs(PG) > 1.0e-12)
259  {
260  float64_t alpha_old = alpha_current;
261  alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
262  d = (alpha_current - alpha_old) * y_current;
263 
264  for (int32_t i=0; i < ex.num_feat_entries; i++)
265  w[ex.features[i].feat_index] += d*ex.features[i].entry;
266 
267 
268  if (use_bias)
269  bias += d;
270  }
271 
272  v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
273  if (alpha_current > 0)
274  nSV++;
275 }
276 
278 {
280 
283  dynamic_cast<CStreamingDenseFeatures<float32_t> *>(feature);
284  if (feat == NULL)
285  SG_ERROR("Expected streaming dense feature <float32_t>\n")
286 
287  train_one(feat->get_vector(), label);
288  }
291  dynamic_cast<CStreamingSparseFeatures<float32_t> *>(feature);
292  if (feat == NULL)
293  SG_ERROR("Expected streaming sparse feature <float32_t>\n")
294 
295  train_one(feat->get_vector(), label);
296  }
297  else {
299  }
300 }
#define SG_INFO(...)
Definition: SGIO.h:118
#define SG_DONE()
Definition: SGIO.h:157
Class OnlineLinearMachine is a generic interface for linear machines like classifiers which work thro...
T sparse_dot(const SGSparseVector< T > &v)
static const float64_t INFTY
infinity
Definition: Math.h:2048
#define SG_ERROR(...)
Definition: SGIO.h:129
#define SG_NOTIMPLEMENTED
Definition: SGIO.h:139
Parameter * m_parameters
Definition: SGObject.h:546
virtual void train_one(SGVector< float32_t > ex, float64_t label)
virtual void set_features(CStreamingDotFeatures *feat)
virtual void train_example(CStreamingDotFeatures *feature, float64_t label)
void add(bool *param, const char *name, const char *description="")
Definition: Parameter.cpp:37
index_t vlen
Definition: SGVector.h:494
double float64_t
Definition: common.h:50
virtual void expand_if_required(float32_t *&vec, int32_t &len)
virtual EFeatureClass get_feature_class() const =0
static T max(T a, T b)
Definition: Math.h:168
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
Definition: Math.h:627
Streaming features that support dot products among other operations.
SGSparseVectorEntry< T > * features
T dense_dot(T alpha, T *vec, int32_t dim, T b)
float float32_t
Definition: common.h:49
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
Class implementing a purely online version of CLibLinear, using the L2R_L1LOSS_SVC_DUAL solver only...
CStreamingDotFeatures * features
static T min(T a, T b)
Definition: Math.h:157
template class SGSparseVector The assumtion is that the stored SGSparseVectorEntry* vector is orde...
This class implements streaming features with sparse feature vectors. The vector is represented as an...

SHOGUN Machine Learning Toolbox - Documentation