SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
LaRank.cpp
Go to the documentation of this file.
1 // -*- C++ -*-
2 // Main functions of the LaRank algorithm for soving Multiclass SVM
3 // Copyright (C) 2008- Antoine Bordes
4 // Shogun specific adjustments (w) 2009 Soeren Sonnenburg
5 
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License, or (at your option) any later version.
10 //
11 // This program is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 //
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 //
20 /***********************************************************************
21  *
22  * LUSH Lisp Universal Shell
23  * Copyright (C) 2002 Leon Bottou, Yann Le Cun, AT&T Corp, NECI.
24  * Includes parts of TL3:
25  * Copyright (C) 1987-1999 Leon Bottou and Neuristique.
26  * Includes selected parts of SN3.2:
27  * Copyright (C) 1991-2001 AT&T Corp.
28  *
29  * This program is free software; you can redistribute it and/or modify
30  * it under the terms of the GNU General Public License as published by
31  * the Free Software Foundation; either version 2 of the License, or
32  * (at your option) any later version.
33  *
34  * This program is distributed in the hope that it will be useful,
35  * but WITHOUT ANY WARRANTY; without even the implied warranty of
36  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37  * GNU General Public License for more details.
38  *
39  * You should have received a copy of the GNU General Public License
40  * aint64_t with this program; if not, write to the Free Software
41  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA
42  *
43  ***********************************************************************/
44 
45 /***********************************************************************
46  * $Id: kcache.c,v 1.9 2007/01/25 22:42:09 leonb Exp $
47  **********************************************************************/
48 
49 #include <vector>
50 #include <algorithm>
51 #include <ctime>
52 #include <algorithm>
53 #include <sys/time.h>
54 
55 #include <shogun/io/SGIO.h>
56 #include <shogun/lib/Signal.h>
57 #include <shogun/lib/Time.h>
61 #include <shogun/kernel/Kernel.h>
63 
64 using namespace shogun;
65 
66 namespace shogun
67 {
68  static larank_kcache_t* larank_kcache_create (CKernel* kernelfunc)
69  {
70  larank_kcache_t *self;
71  self = SG_CALLOC (larank_kcache_t, 1);
72  self->l = 0;
73  self->maxrowlen = 0;
74  self->func = kernelfunc;
75  self->prevbuddy = self;
76  self->nextbuddy = self;
77  self->cursize = sizeof (larank_kcache_t);
78  self->maxsize = 256 * 1024 * 1024;
79  self->qprev = SG_MALLOC(int32_t, 1);
80  self->qnext = SG_MALLOC(int32_t, 1);
81  self->rnext = self->qnext + 1;
82  self->rprev = self->qprev + 1;
83  self->rprev[-1] = -1;
84  self->rnext[-1] = -1;
85  return self;
86  }
87 
88  static void xtruncate (larank_kcache_t * self, int32_t k, int32_t nlen)
89  {
90  int32_t olen = self->rsize[k];
91  if (nlen < olen)
92  {
93  float32_t *ndata;
94  float32_t *odata = self->rdata[k];
95  if (nlen > 0)
96  {
97  ndata = SG_MALLOC(float32_t, nlen);
98  memcpy (ndata, odata, nlen * sizeof (float32_t));
99  }
100  else
101  {
102  ndata = 0;
103  self->rnext[self->rprev[k]] = self->rnext[k];
104  self->rprev[self->rnext[k]] = self->rprev[k];
105  self->rnext[k] = self->rprev[k] = k;
106  }
107  SG_FREE (odata);
108  self->rdata[k] = ndata;
109  self->rsize[k] = nlen;
110  self->cursize += (int64_t) (nlen - olen) * sizeof (float32_t);
111  }
112  }
113 
114  static void xpurge (larank_kcache_t * self)
115  {
116  if (self->cursize > self->maxsize)
117  {
118  int32_t k = self->rprev[-1];
119  while (self->cursize > self->maxsize && k != self->rnext[-1])
120  {
121  int32_t pk = self->rprev[k];
122  xtruncate (self, k, 0);
123  k = pk;
124  }
125  }
126  }
127 
128  static void larank_kcache_set_maximum_size (larank_kcache_t * self, int64_t entries)
129  {
130  ASSERT (self)
131  ASSERT (entries > 0)
132  self->maxsize = entries;
133  xpurge (self);
134  }
135 
136  static void larank_kcache_destroy (larank_kcache_t * self)
137  {
138  if (self)
139  {
140  int32_t i;
141  larank_kcache_t *nb = self->nextbuddy;
142  larank_kcache_t *pb = self->prevbuddy;
143  pb->nextbuddy = nb;
144  nb->prevbuddy = pb;
145  /* delete */
146  if (self->i2r)
147  SG_FREE (self->i2r);
148  if (self->r2i)
149  SG_FREE (self->r2i);
150  if (self->rdata)
151  for (i = 0; i < self->l; i++)
152  if (self->rdata[i])
153  SG_FREE (self->rdata[i]);
154  if (self->rdata)
155  SG_FREE (self->rdata);
156  if (self->rsize)
157  SG_FREE (self->rsize);
158  if (self->rdiag)
159  SG_FREE (self->rdiag);
160  if (self->qnext)
161  SG_FREE (self->qnext);
162  if (self->qprev)
163  SG_FREE (self->qprev);
164  memset (self, 0, sizeof (larank_kcache_t));
165  SG_FREE (self);
166  }
167  }
168 
169  static void xminsize (larank_kcache_t * self, int32_t n)
170  {
171  int32_t ol = self->l;
172  if (n > ol)
173  {
174  int32_t i;
175  int32_t nl = CMath::max (256, ol);
176  while (nl < n)
177  nl = nl + nl;
178  self->i2r = SG_REALLOC (int32_t, self->i2r, self->l, nl);
179  self->r2i = SG_REALLOC (int32_t, self->r2i, self->l, nl);
180  self->rsize = SG_REALLOC (int32_t, self->rsize, self->l, nl);
181  self->qnext = SG_REALLOC (int32_t, self->qnext, 1+self->l, (1 + nl));
182  self->qprev = SG_REALLOC (int32_t, self->qprev, 1+self->l, (1 + nl));
183  self->rdiag = SG_REALLOC (float32_t, self->rdiag, self->l, nl);
184  self->rdata = SG_REALLOC (float32_t*, self->rdata, self->l, nl);
185  self->rnext = self->qnext + 1;
186  self->rprev = self->qprev + 1;
187  for (i = ol; i < nl; i++)
188  {
189  self->i2r[i] = i;
190  self->r2i[i] = i;
191  self->rsize[i] = -1;
192  self->rnext[i] = i;
193  self->rprev[i] = i;
194  self->rdata[i] = 0;
195  }
196  self->l = nl;
197  }
198  }
199 
200  static int32_t* larank_kcache_r2i (larank_kcache_t * self, int32_t n)
201  {
202  xminsize (self, n);
203  return self->r2i;
204  }
205 
206  static void xextend (larank_kcache_t * self, int32_t k, int32_t nlen)
207  {
208  int32_t olen = self->rsize[k];
209  if (nlen > olen)
210  {
211  float32_t *ndata = SG_MALLOC(float32_t, nlen);
212  if (olen > 0)
213  {
214  float32_t *odata = self->rdata[k];
215  memcpy (ndata, odata, olen * sizeof (float32_t));
216  SG_FREE (odata);
217  }
218  self->rdata[k] = ndata;
219  self->rsize[k] = nlen;
220  self->cursize += (int64_t) (nlen - olen) * sizeof (float32_t);
221  self->maxrowlen = CMath::max (self->maxrowlen, nlen);
222  }
223  }
224 
225  static void xswap (larank_kcache_t * self, int32_t i1, int32_t i2, int32_t r1, int32_t r2)
226  {
227  /* swap row data */
228  if (r1 < self->maxrowlen || r2 < self->maxrowlen)
229  {
230  int32_t mrl = 0;
231  int32_t k = self->rnext[-1];
232  while (k >= 0)
233  {
234  int32_t nk = self->rnext[k];
235  int32_t n = self->rsize[k];
236  int32_t rr = self->i2r[k];
237  float32_t *d = self->rdata[k];
238  if (r1 < n)
239  {
240  if (r2 < n)
241  {
242  float32_t t1 = d[r1];
243  float32_t t2 = d[r2];
244  d[r1] = t2;
245  d[r2] = t1;
246  }
247  else if (rr == r2)
248  {
249  d[r1] = self->rdiag[k];
250  }
251  else
252  {
253  int32_t arsize = self->rsize[i2];
254  if (rr < arsize && rr != r1)
255  d[r1] = self->rdata[i2][rr];
256  else
257  xtruncate (self, k, r1);
258  }
259  }
260  else if (r2 < n)
261  {
262  if (rr == r1)
263  {
264  d[r2] = self->rdiag[k];
265  }
266  else
267  {
268  int32_t arsize = self->rsize[i1];
269  if (rr < arsize && rr != r2)
270  d[r2] = self->rdata[i1][rr];
271  else
272  xtruncate (self, k, r2);
273  }
274  }
275  mrl = CMath::max (mrl, self->rsize[k]);
276  k = nk;
277  }
278  self->maxrowlen = mrl;
279  }
280  /* swap r2i and i2r */
281  self->r2i[r1] = i2;
282  self->r2i[r2] = i1;
283  self->i2r[i1] = r2;
284  self->i2r[i2] = r1;
285  }
286 
287  static void larank_kcache_swap_rr (larank_kcache_t * self, int32_t r1, int32_t r2)
288  {
289  xminsize (self, 1 + CMath::max(r1, r2));
290  xswap (self, self->r2i[r1], self->r2i[r2], r1, r2);
291  }
292 
293  static void larank_kcache_swap_ri (larank_kcache_t * self, int32_t r1, int32_t i2)
294  {
295  xminsize (self, 1 + CMath::max (r1, i2));
296  xswap (self, self->r2i[r1], i2, r1, self->i2r[i2]);
297  }
298 
299  static float64_t xquery (larank_kcache_t * self, int32_t i, int32_t j)
300  {
301  /* search buddies */
302  larank_kcache_t *cache = self->nextbuddy;
303  do
304  {
305  int32_t l = cache->l;
306  if (i < l && j < l)
307  {
308  int32_t s = cache->rsize[i];
309  int32_t p = cache->i2r[j];
310  if (p < s)
311  return cache->rdata[i][p];
312  if (i == j && s >= 0)
313  return cache->rdiag[i];
314  p = cache->i2r[i];
315  s = cache->rsize[j];
316  if (p < s)
317  return cache->rdata[j][p];
318  }
319  cache = cache->nextbuddy;
320  }
321  while (cache != self);
322  /* compute */
323  return self->func->kernel(i, j);
324  }
325 
326 
327  static float64_t larank_kcache_query (larank_kcache_t * self, int32_t i, int32_t j)
328  {
329  ASSERT (self)
330  ASSERT (i >= 0)
331  ASSERT (j >= 0)
332  return xquery (self, i, j);
333  }
334 
335 
336  static void larank_kcache_set_buddy (larank_kcache_t * self, larank_kcache_t * buddy)
337  {
338  larank_kcache_t *p = self;
339  larank_kcache_t *selflast = self->prevbuddy;
340  larank_kcache_t *buddylast = buddy->prevbuddy;
341  /* check functions are identical */
342  ASSERT (self->func == buddy->func)
343  /* make sure we are not already buddies */
344  do
345  {
346  if (p == buddy)
347  return;
348  p = p->nextbuddy;
349  }
350  while (p != self);
351  /* link */
352  selflast->nextbuddy = buddy;
353  buddy->prevbuddy = selflast;
354  buddylast->nextbuddy = self;
355  self->prevbuddy = buddylast;
356  }
357 
358 
359  static float32_t* larank_kcache_query_row (larank_kcache_t * self, int32_t i, int32_t len)
360  {
361  ASSERT (i >= 0)
362  if (i < self->l && len <= self->rsize[i])
363  {
364  self->rnext[self->rprev[i]] = self->rnext[i];
365  self->rprev[self->rnext[i]] = self->rprev[i];
366  }
367  else
368  {
369  int32_t olen, p;
370  float32_t *d;
371  if (i >= self->l || len >= self->l)
372  xminsize (self, CMath::max (1 + i, len));
373  olen = self->rsize[i];
374  if (olen < len)
375  {
376  if (olen < 0)
377  {
378  self->rdiag[i] = self->func->kernel(i, i);
379  olen = self->rsize[i] = 0;
380  }
381  xextend (self, i, len);
382  d = self->rdata[i];
383  self->rsize[i] = olen;
384  for (p = olen; p < len; p++)
385  d[p] = larank_kcache_query (self, self->r2i[p], i);
386  self->rsize[i] = len;
387  }
388  self->rnext[self->rprev[i]] = self->rnext[i];
389  self->rprev[self->rnext[i]] = self->rprev[i];
390  xpurge (self);
391  }
392  self->rprev[i] = -1;
393  self->rnext[i] = self->rnext[-1];
394  self->rnext[self->rprev[i]] = i;
395  self->rprev[self->rnext[i]] = i;
396  return self->rdata[i];
397  }
398 
399 }
400 
401 
402 // Initializing an output class (basically creating a kernel cache for it)
403 void LaRankOutput::initialize (CKernel* kfunc, int64_t cache)
404 {
405  kernel = larank_kcache_create (kfunc);
406  larank_kcache_set_maximum_size (kernel, cache * 1024 * 1024);
407  beta = SG_MALLOC(float32_t, 1);
408  g = SG_MALLOC(float32_t, 1);
409  *beta=0;
410  *g=0;
411  l = 0;
412 }
413 
414 // Destroying an output class (basically destroying the kernel cache)
415 void LaRankOutput::destroy ()
416 {
417  larank_kcache_destroy (kernel);
418  kernel=NULL;
419  SG_FREE(beta);
420  SG_FREE(g);
421  beta=NULL;
422  g=NULL;
423 }
424 
425 // !Important! Computing the score of a given input vector for the actual output
426 float64_t LaRankOutput::computeScore (int32_t x_id)
427 {
428  if (l == 0)
429  return 0;
430  else
431  {
432  float32_t *row = larank_kcache_query_row (kernel, x_id, l);
433  return CMath::dot (beta, row, l);
434  }
435 }
436 
437 // !Important! Computing the gradient of a given input vector for the actual output
438 float64_t LaRankOutput::computeGradient (int32_t xi_id, int32_t yi, int32_t ythis)
439 {
440  return (yi == ythis ? 1 : 0) - computeScore (xi_id);
441 }
442 
443 // Updating the solution in the actual output
444 void LaRankOutput::update (int32_t x_id, float64_t lambda, float64_t gp)
445 {
446  int32_t *r2i = larank_kcache_r2i (kernel, l);
447  int64_t xr = l + 1;
448  for (int32_t r = 0; r < l; r++)
449  if (r2i[r] == x_id)
450  {
451  xr = r;
452  break;
453  }
454 
455  // updates the cache order and the beta coefficient
456  if (xr < l)
457  {
458  beta[xr]+=lambda;
459  }
460  else
461  {
462  larank_kcache_swap_ri (kernel, l, x_id);
463  g = SG_REALLOC(float32_t, g, l, l+1);
464  beta = SG_REALLOC(float32_t, beta, l, l+1);
465  g[l]=gp;
466  beta[l]=lambda;
467  l++;
468  }
469 
470  // update stored gradients
471  float32_t *row = larank_kcache_query_row (kernel, x_id, l);
472  for (int32_t r = 0; r < l; r++)
473  {
474  float64_t oldg = g[r];
475  g[r]=oldg - lambda * row[r];
476  }
477 }
478 
479 // Linking the cahe of this output to the cache of an other "buddy" output
480 // so that if a requested value is not found in this cache, you can ask your buddy if it has it.
481 void LaRankOutput::set_kernel_buddy (larank_kcache_t * bud)
482 {
483  larank_kcache_set_buddy (bud, kernel);
484 }
485 
486 // Removing useless support vectors (for which beta=0)
487 int32_t LaRankOutput::cleanup ()
488 {
489  int32_t count = 0;
490  std::vector < int32_t >idx;
491  for (int32_t x = 0; x < l; x++)
492  {
493  if ((beta[x] < FLT_EPSILON) && (beta[x] > -FLT_EPSILON))
494  {
495  idx.push_back (x);
496  count++;
497  }
498  }
499  int32_t new_l = l - count;
500  for (int32_t xx = 0; xx < count; xx++)
501  {
502  int32_t i = idx[xx] - xx;
503  for (int32_t r = i; r < (l - 1); r++)
504  {
505  larank_kcache_swap_rr (kernel, r, int64_t(r) + 1);
506  beta[r]=beta[r + 1];
507  g[r]=g[r + 1];
508  }
509  }
510  beta = SG_REALLOC(float32_t, beta, l, new_l+1);
511  g = SG_REALLOC(float32_t, g, l, new_l+1);
512  beta[new_l]=0;
513  g[new_l]=0;
514  l = new_l;
515  return count;
516 }
517 
518 // --- Below are information or "get" functions --- //
519 //
520 float64_t LaRankOutput::getW2 ()
521 {
522  float64_t sum = 0;
523  int32_t *r2i = larank_kcache_r2i (kernel, l + 1);
524  for (int32_t r = 0; r < l; r++)
525  {
526  float32_t *row_r = larank_kcache_query_row (kernel, r2i[r], l);
527  sum += beta[r] * CMath::dot (beta, row_r, l);
528  }
529  return sum;
530 }
531 
532 float64_t LaRankOutput::getKii (int32_t x_id)
533 {
534  return larank_kcache_query (kernel, x_id, x_id);
535 }
536 
537 //
538 float64_t LaRankOutput::getBeta (int32_t x_id)
539 {
540  int32_t *r2i = larank_kcache_r2i (kernel, l);
541  int32_t xr = -1;
542  for (int32_t r = 0; r < l; r++)
543  if (r2i[r] == x_id)
544  {
545  xr = r;
546  break;
547  }
548  return (xr < 0 ? 0 : beta[xr]);
549 }
550 
551 //
552 float64_t LaRankOutput::getGradient (int32_t x_id)
553 {
554  int32_t *r2i = larank_kcache_r2i (kernel, l);
555  int32_t xr = -1;
556  for (int32_t r = 0; r < l; r++)
557  if (r2i[r] == x_id)
558  {
559  xr = r;
560  break;
561  }
562  return (xr < 0 ? 0 : g[xr]);
563 }
564 bool LaRankOutput::isSupportVector (int32_t x_id) const
565 {
566  int32_t *r2i = larank_kcache_r2i (kernel, l);
567  int32_t xr = -1;
568  for (int32_t r = 0; r < l; r++)
569  if (r2i[r] == x_id)
570  {
571  xr = r;
572  break;
573  }
574  return (xr >= 0);
575 }
576 
577 //
578 int32_t LaRankOutput::getSV (float32_t* &sv) const
579 {
580  sv=SG_MALLOC(float32_t, l);
581  int32_t *r2i = larank_kcache_r2i (kernel, l);
582  for (int32_t r = 0; r < l; r++)
583  sv[r]=r2i[r];
584  return l;
585 }
586 
588  nb_seen_examples (0), nb_removed (0),
589  n_pro (0), n_rep (0), n_opt (0),
590  w_pro (1), w_rep (1), w_opt (1), y0 (0), m_dual (0),
591  batch_mode(true), step(0), max_iteration(1000)
592 {
593 }
594 
597  nb_seen_examples (0), nb_removed (0),
598  n_pro (0), n_rep (0), n_opt (0),
599  w_pro (1), w_rep (1), w_opt (1), y0 (0), m_dual (0),
600  batch_mode(true), step(0), max_iteration(1000)
601 {
602 }
603 
605 {
606  destroy();
607 }
608 
610 {
611  tau = 0.0001;
612 
616 
618 
619  if (data)
620  {
621  if (data->get_num_vectors() != m_labels->get_num_labels())
622  {
623  SG_ERROR("Numbert of vectors (%d) does not match number of labels (%d)\n",
625  }
626  m_kernel->init(data, data);
627  }
628 
630 
633 
634  int32_t n_it = 1;
635  float64_t gap = DBL_MAX;
636 
637  SG_INFO("Training on %d examples\n", nb_train)
638  while (gap > get_C() && (!CSignal::cancel_computations()) &&
639  n_it < max_iteration) // stopping criteria
640  {
641  float64_t tr_err = 0;
642  int32_t ind = step;
643  for (int32_t i = 0; i < nb_train; i++)
644  {
645  int32_t y=((CMulticlassLabels*) m_labels)->get_label(i);
646  if (add (i, y) != y) // call the add function
647  tr_err++;
648 
649  if (ind && i / ind)
650  {
651  SG_DEBUG("Done: %d %% Train error (online): %f%%\n",
652  (int32_t) (((float64_t) i) / nb_train * 100), (tr_err / ((float64_t) i + 1)) * 100);
653  ind += step;
654  }
655  }
656 
657  SG_DEBUG("End of iteration %d\n", n_it)
658  SG_DEBUG("Train error (online): %f%%\n", (tr_err / nb_train) * 100)
659  gap = computeGap ();
660  SG_ABS_PROGRESS(gap, -CMath::log10(gap), -CMath::log10(DBL_MAX), -CMath::log10(get_C()), 6)
661 
662  if (!batch_mode) // skip stopping criteria if online mode
663  gap = 0;
664  n_it++;
665  }
666  SG_DONE()
667 
668  if (n_it >= max_iteration && gap > get_C())
669  {
670  SG_WARNING("LaRank did not converge after %d iterations.\n",
672  }
673 
674  int32_t num_classes = outputs.size();
675  create_multiclass_svm(num_classes);
676  SG_DEBUG("%d classes\n", num_classes)
677 
678  // Used for saving a model file
679  int32_t i=0;
680  for (outputhash_t::const_iterator it = outputs.begin (); it != outputs.end (); ++it)
681  {
682  const LaRankOutput* o=&(it->second);
683 
684  larank_kcache_t* k=o->getKernel();
685  int32_t l=o->get_l();
686  float32_t* beta=o->getBetas();
687  int32_t *r2i = larank_kcache_r2i (k, l);
688 
689  ASSERT(l>0)
690  SG_DEBUG("svm[%d] has %d sv, b=%f\n", i, l, 0.0)
691 
692  CSVM* svm=new CSVM(l);
693 
694  for (int32_t j=0; j<l; j++)
695  {
696  svm->set_alpha(j, beta[j]);
697  svm->set_support_vector(j, r2i[j]);
698  }
699 
700  svm->set_bias(0);
701  set_svm(i, svm);
702  i++;
703  }
704  destroy();
705 
706  return true;
707 }
708 
709 // LEARNING FUNCTION: add new patterns and run optimization steps selected with adaptative schedule
710 int32_t CLaRank::add (int32_t x_id, int32_t yi)
711 {
712  ++nb_seen_examples;
713  // create a new output object if this one has never been seen before
714  if (!getOutput (yi))
715  {
716  outputs.insert (std::make_pair (yi, LaRankOutput ()));
717  LaRankOutput *cur = getOutput (yi);
718  cur->initialize (m_kernel, cache);
719  if (outputs.size () == 1)
720  y0 = outputs.begin ()->first;
721  // link the cache of this new output to a buddy
722  if (outputs.size () > 1)
723  {
724  LaRankOutput *out0 = getOutput (y0);
725  cur->set_kernel_buddy (out0->getKernel ());
726  }
727  }
728 
729  LaRankPattern tpattern (x_id, yi);
730  LaRankPattern & pattern = (patterns.isPattern (x_id)) ? patterns.getPattern (x_id) : tpattern;
731 
732  // ProcessNew with the "fresh" pattern
733  float64_t time1 = CTime::get_curtime();
734  process_return_t pro_ret = process (pattern, processNew);
735  float64_t dual_increase = pro_ret.dual_increase;
736  float64_t duration = (CTime::get_curtime() - time1);
737  float64_t coeff = dual_increase / (0.00001 + duration);
738  m_dual += dual_increase;
739  n_pro++;
740  w_pro = 0.05 * coeff + (1 - 0.05) * w_pro;
741 
742  // ProcessOld & Optimize until ready for a new processnew
743  // (Adaptative schedule here)
744  for (;;)
745  {
746  float64_t w_sum = w_pro + w_rep + w_opt;
747  float64_t prop_min = w_sum / 20;
748  if (w_pro < prop_min)
749  w_pro = prop_min;
750  if (w_rep < prop_min)
751  w_rep = prop_min;
752  if (w_opt < prop_min)
753  w_opt = prop_min;
754  w_sum = w_pro + w_rep + w_opt;
755  float64_t r = CMath::random(0.0, w_sum);
756  if (r <= w_pro)
757  {
758  break;
759  }
760  else if ((r > w_pro) && (r <= w_pro + w_rep)) // ProcessOld here
761  {
762  float64_t ltime1 = CTime::get_curtime ();
763  float64_t ldual_increase = reprocess ();
764  float64_t lduration = (CTime::get_curtime () - ltime1);
765  float64_t lcoeff = ldual_increase / (0.00001 + lduration);
766  m_dual += ldual_increase;
767  n_rep++;
768  w_rep = 0.05 * lcoeff + (1 - 0.05) * w_rep;
769  }
770  else // Optimize here
771  {
772  float64_t ltime1 = CTime::get_curtime ();
773  float64_t ldual_increase = optimize ();
774  float64_t lduration = (CTime::get_curtime () - ltime1);
775  float64_t lcoeff = ldual_increase / (0.00001 + lduration);
776  m_dual += ldual_increase;
777  n_opt++;
778  w_opt = 0.05 * lcoeff + (1 - 0.05) * w_opt;
779  }
780  }
781  if (nb_seen_examples % 100 == 0) // Cleanup useless Support Vectors/Patterns sometimes
782  nb_removed += cleanup ();
783  return pro_ret.ypred;
784 }
785 
786 // PREDICTION FUNCTION: main function in la_rank_classify
787 int32_t CLaRank::predict (int32_t x_id)
788 {
789  int32_t res = -1;
790  float64_t score_max = -DBL_MAX;
791  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end ();++it)
792  {
793  float64_t score = it->second.computeScore (x_id);
794  if (score > score_max)
795  {
796  score_max = score;
797  res = it->first;
798  }
799  }
800  return res;
801 }
802 
804 {
805  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end ();++it)
806  it->second.destroy ();
807  outputs.clear();
808 }
809 
810 
811 // Compute Duality gap (costly but used in stopping criteria in batch mode)
813 {
814  float64_t sum_sl = 0;
815  float64_t sum_bi = 0;
816  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
817  {
818  const LaRankPattern & p = patterns[i];
819  if (!p.exists ())
820  continue;
821  LaRankOutput *out = getOutput (p.y);
822  if (!out)
823  continue;
824  sum_bi += out->getBeta (p.x_id);
825  float64_t gi = out->computeGradient (p.x_id, p.y, p.y);
826  float64_t gmin = DBL_MAX;
827  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
828  {
829  if (it->first != p.y && it->second.isSupportVector (p.x_id))
830  {
831  float64_t g =
832  it->second.computeGradient (p.x_id, p.y, it->first);
833  if (g < gmin)
834  gmin = g;
835  }
836  }
837  sum_sl += CMath::max (0.0, gi - gmin);
838  }
839  return CMath::max (0.0, computeW2 () + get_C() * sum_sl - sum_bi);
840 }
841 
842 // Nuber of classes so far
843 uint32_t CLaRank::getNumOutputs () const
844 {
845  return outputs.size ();
846 }
847 
848 // Set max number of iterations before training is stopped
849 void CLaRank::set_max_iteration(int32_t max_iter)
850 {
851  REQUIRE(max_iter > 0,
852  "Max iteration (given: %d) must be positive.\n",
853  max_iter);
854  max_iteration = max_iter;
855 }
856 
857 // Number of Support Vectors
858 int32_t CLaRank::getNSV ()
859 {
860  int32_t res = 0;
861  for (outputhash_t::const_iterator it = outputs.begin (); it != outputs.end (); ++it)
862  {
863  float32_t* sv=NULL;
864  res += it->second.getSV (sv);
865  SG_FREE(sv);
866  }
867  return res;
868 }
869 
870 // Norm of the parameters vector
872 {
873  float64_t res = 0;
874  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
875  {
876  const LaRankPattern & p = patterns[i];
877  if (!p.exists ())
878  continue;
879  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
880  if (it->second.getBeta (p.x_id))
881  res += it->second.getBeta (p.x_id) * it->second.computeScore (p.x_id);
882  }
883  return res;
884 }
885 
886 // Compute Dual objective value
888 {
889  float64_t res = 0;
890  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
891  {
892  const LaRankPattern & p = patterns[i];
893  if (!p.exists ())
894  continue;
895  LaRankOutput *out = getOutput (p.y);
896  if (!out)
897  continue;
898  res += out->getBeta (p.x_id);
899  }
900  return res - computeW2 () / 2;
901 }
902 
903 LaRankOutput *CLaRank::getOutput (int32_t index)
904 {
905  outputhash_t::iterator it = outputs.find (index);
906  return it == outputs.end ()? NULL : &it->second;
907 }
908 
909 // IMPORTANT Main SMO optimization step
910 CLaRank::process_return_t CLaRank::process (const LaRankPattern & pattern, process_type ptype)
911 {
912  process_return_t pro_ret = process_return_t (0, 0);
913 
914  /*
915  ** compute gradient and sort
916  */
917  std::vector < outputgradient_t > outputgradients(getNumOutputs ());
918  std::vector < outputgradient_t > outputscores(getNumOutputs ());
919 
920  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
921  {
922  if (ptype != processOptimize
923  || it->second.isSupportVector (pattern.x_id))
924  {
925  float64_t g =
926  it->second.computeGradient (pattern.x_id, pattern.y, it->first);
927  outputgradients.push_back (outputgradient_t (it->first, g));
928  if (it->first == pattern.y)
929  outputscores.push_back (outputgradient_t (it->first, (1 - g)));
930  else
931  outputscores.push_back (outputgradient_t (it->first, -g));
932  }
933  }
934 
935  std::sort (outputgradients.begin (), outputgradients.end ());
936 
937  /*
938  ** determine the prediction
939  */
940  std::sort (outputscores.begin (), outputscores.end ());
941  pro_ret.ypred = outputscores[0].output;
942 
943  /*
944  ** Find yp (1st part of the pair)
945  */
946  outputgradient_t ygp;
947  LaRankOutput *outp = NULL;
948  uint32_t p;
949  for (p = 0; p < outputgradients.size (); ++p)
950  {
951  outputgradient_t & current = outputgradients[p];
952  LaRankOutput *output = getOutput (current.output);
953  bool support = ptype == processOptimize || output->isSupportVector (pattern.x_id);
954  bool goodclass = current.output == pattern.y;
955  if ((!support && goodclass) ||
956  (support && output->getBeta (pattern.x_id) < (goodclass ? get_C() : 0)))
957  {
958  ygp = current;
959  outp = output;
960  break;
961  }
962  }
963  if (p == outputgradients.size ())
964  return pro_ret;
965 
966  /*
967  ** Find ym (2nd part of the pair)
968  */
969  outputgradient_t ygm;
970  LaRankOutput *outm = NULL;
971  int32_t m;
972  for (m = outputgradients.size () - 1; m >= 0; --m)
973  {
974  outputgradient_t & current = outputgradients[m];
975  LaRankOutput *output = getOutput (current.output);
976  bool support = ptype == processOptimize || output->isSupportVector (pattern.x_id);
977  bool goodclass = current.output == pattern.y;
978  if (!goodclass || (support && output->getBeta (pattern.x_id) > 0))
979  {
980  ygm = current;
981  outm = output;
982  break;
983  }
984  }
985  if (m < 0)
986  return pro_ret;
987 
988  /*
989  ** Throw or Insert pattern
990  */
991  if ((ygp.gradient - ygm.gradient) < tau)
992  return pro_ret;
993  if (ptype == processNew)
994  patterns.insert (pattern);
995 
996  /*
997  ** compute lambda and clip it
998  */
999  float64_t kii = outp->getKii (pattern.x_id);
1000  float64_t lambda = (ygp.gradient - ygm.gradient) / (2 * kii);
1001  if (ptype == processOptimize || outp->isSupportVector (pattern.x_id))
1002  {
1003  float64_t beta = outp->getBeta (pattern.x_id);
1004  if (ygp.output == pattern.y)
1005  lambda = CMath::min (lambda, get_C() - beta);
1006  else
1007  lambda = CMath::min (lambda, fabs (beta));
1008  }
1009  else
1010  lambda = CMath::min (lambda, get_C());
1011 
1012  /*
1013  ** update the solution
1014  */
1015  outp->update (pattern.x_id, lambda, ygp.gradient);
1016  outm->update (pattern.x_id, -lambda, ygm.gradient);
1017 
1018  pro_ret.dual_increase = lambda * ((ygp.gradient - ygm.gradient) - lambda * kii);
1019  return pro_ret;
1020 }
1021 
1022 // ProcessOld
1023 float64_t CLaRank::reprocess ()
1024 {
1025  if (patterns.size ())
1026  {
1027  for (int32_t n = 0; n < 10; ++n)
1028  {
1029  process_return_t pro_ret = process (patterns.sample (), processOld);
1030  if (pro_ret.dual_increase)
1031  return pro_ret.dual_increase;
1032  }
1033  }
1034  return 0;
1035 }
1036 
1037 // Optimize
1038 float64_t CLaRank::optimize ()
1039 {
1040  float64_t dual_increase = 0;
1041  if (patterns.size ())
1042  {
1043  for (int32_t n = 0; n < 10; ++n)
1044  {
1045  process_return_t pro_ret =
1046  process (patterns.sample(), processOptimize);
1047  dual_increase += pro_ret.dual_increase;
1048  }
1049  }
1050  return dual_increase;
1051 }
1052 
1053 // remove patterns and return the number of patterns that were removed
1054 uint32_t CLaRank::cleanup ()
1055 {
1056  /*
1057  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
1058  it->second.cleanup ();
1059 
1060  uint32_t res = 0;
1061  for (uint32_t i = 0; i < patterns.size (); ++i)
1062  {
1063  LaRankPattern & p = patterns[i];
1064  if (p.exists () && !outputs[p.y].isSupportVector (p.x_id))
1065  {
1066  patterns.remove (i);
1067  ++res;
1068  }
1069  }
1070  return res;
1071  */
1072  return 0;
1073 }
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
static void xtruncate(larank_kcache_t *self, int32_t k, int32_t nlen)
Definition: LaRank.cpp:88
static float32_t * larank_kcache_query_row(larank_kcache_t *self, int32_t i, int32_t len)
Definition: LaRank.cpp:359
#define SG_INFO(...)
Definition: SGIO.h:118
#define SG_DONE()
Definition: SGIO.h:157
bool batch_mode
whether to use online learning or batch training
Definition: LaRank.h:508
virtual void destroy()
Definition: LaRank.cpp:803
int32_t getNSV()
Definition: LaRank.cpp:858
virtual ELabelType get_label_type() const =0
static void larank_kcache_set_maximum_size(larank_kcache_t *self, int64_t entries)
Definition: LaRank.cpp:128
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
int64_t cache
cache
Definition: LaRank.h:506
bool train_machine(CFeatures *data)
Definition: LaRank.cpp:609
virtual int32_t get_num_labels() const =0
static void xswap(larank_kcache_t *self, int32_t i1, int32_t i2, int32_t r1, int32_t r2)
Definition: LaRank.cpp:225
float64_t tau
tau
Definition: LaRank.h:501
multi-class labels 0,1,...
Definition: LabelTypes.h:20
static float64_t log10(float64_t v)
Definition: Math.h:897
int32_t max_iteration
Max number of iterations before training is stopped.
Definition: LaRank.h:514
void(* update)(float *foo, float bar)
Definition: JLCoverTree.h:529
virtual int32_t get_num_vectors() const =0
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
static float64_t larank_kcache_query(larank_kcache_t *self, int32_t i, int32_t j)
Definition: LaRank.cpp:327
virtual int32_t add(int32_t x_id, int32_t yi)
Definition: LaRank.cpp:710
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:517
virtual float64_t computeGap()
Definition: LaRank.cpp:812
static uint64_t random()
Definition: Math.h:1019
static void larank_kcache_set_buddy(larank_kcache_t *self, larank_kcache_t *buddy)
Definition: LaRank.cpp:336
static float64_t get_curtime()
Definition: Time.h:121
float64_t computeW2()
Definition: LaRank.cpp:871
int32_t nb_train
nb train
Definition: LaRank.h:504
static void xextend(larank_kcache_t *self, int32_t k, int32_t nlen)
Definition: LaRank.cpp:206
Multiclass Labels for multi-class classification.
static float64_t xquery(larank_kcache_t *self, int32_t i, int32_t j)
Definition: LaRank.cpp:299
static void larank_kcache_swap_rr(larank_kcache_t *self, int32_t r1, int32_t r2)
Definition: LaRank.cpp:287
#define ASSERT(x)
Definition: SGIO.h:201
float64_t getDual()
Definition: LaRank.cpp:887
class MultiClassSVM
Definition: MulticlassSVM.h:28
void set_bias(float64_t bias)
static void clear_cancel()
Definition: Signal.cpp:129
virtual ~CLaRank()
Definition: LaRank.cpp:604
double float64_t
Definition: common.h:50
bool set_alpha(int32_t idx, float64_t val)
static T max(T a, T b)
Definition: Math.h:168
bool set_support_vector(int32_t idx, int32_t val)
static int32_t * larank_kcache_r2i(larank_kcache_t *self, int32_t n)
Definition: LaRank.cpp:200
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
Definition: Math.h:627
virtual uint32_t getNumOutputs() const
Definition: LaRank.cpp:843
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:526
#define SG_ABS_PROGRESS(...)
Definition: SGIO.h:152
static void larank_kcache_destroy(larank_kcache_t *self)
Definition: LaRank.cpp:136
float float32_t
Definition: common.h:49
static void xminsize(larank_kcache_t *self, int32_t n)
Definition: LaRank.cpp:169
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
static void larank_kcache_swap_ri(larank_kcache_t *self, int32_t r1, int32_t i2)
Definition: LaRank.cpp:293
static larank_kcache_t * larank_kcache_create(CKernel *kernelfunc)
Definition: LaRank.cpp:68
The class Features is the base class of all feature objects.
Definition: Features.h:68
bool create_multiclass_svm(int32_t num_classes)
static T min(T a, T b)
Definition: Math.h:157
static void xpurge(larank_kcache_t *self)
Definition: LaRank.cpp:114
A generic Support Vector Machine Interface.
Definition: SVM.h:49
virtual int32_t predict(int32_t x_id)
Definition: LaRank.cpp:787
The Kernel base class.
Definition: Kernel.h:159
int32_t get_cache_size()
Definition: Kernel.h:599
#define SG_WARNING(...)
Definition: SGIO.h:128
multiclass one vs rest strategy used to train generic multiclass machines for K-class problems with b...
bool set_svm(int32_t num, CSVM *svm)
int32_t step
progess output
Definition: LaRank.h:511
void set_max_iteration(int32_t max_iter)
Definition: LaRank.cpp:849

SHOGUN Machine Learning Toolbox - Documentation