SHOGUN  4.1.0
 全部  命名空间 文件 函数 变量 类型定义 枚举 枚举值 友元 宏定义  
LaRank.cpp
浏览该文件的文档.
1 // -*- C++ -*-
2 // Main functions of the LaRank algorithm for soving Multiclass SVM
3 // Copyright (C) 2008- Antoine Bordes
4 // Shogun specific adjustments (w) 2009 Soeren Sonnenburg
5 
6 // This library is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License, or (at your option) any later version.
10 //
11 // This program is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 //
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 //
20 /***********************************************************************
21  *
22  * LUSH Lisp Universal Shell
23  * Copyright (C) 2002 Leon Bottou, Yann Le Cun, AT&T Corp, NECI.
24  * Includes parts of TL3:
25  * Copyright (C) 1987-1999 Leon Bottou and Neuristique.
26  * Includes selected parts of SN3.2:
27  * Copyright (C) 1991-2001 AT&T Corp.
28  *
29  * This program is free software; you can redistribute it and/or modify
30  * it under the terms of the GNU General Public License as published by
31  * the Free Software Foundation; either version 2 of the License, or
32  * (at your option) any later version.
33  *
34  * This program is distributed in the hope that it will be useful,
35  * but WITHOUT ANY WARRANTY; without even the implied warranty of
36  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
37  * GNU General Public License for more details.
38  *
39  * You should have received a copy of the GNU General Public License
40  * aint64_t with this program; if not, write to the Free Software
41  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA
42  *
43  ***********************************************************************/
44 
45 /***********************************************************************
46  * $Id: kcache.c,v 1.9 2007/01/25 22:42:09 leonb Exp $
47  **********************************************************************/
48 
49 #include <vector>
50 #include <algorithm>
51 #include <ctime>
52 #include <algorithm>
53 #include <sys/time.h>
54 
55 #include <shogun/io/SGIO.h>
56 #include <shogun/lib/Signal.h>
57 #include <shogun/lib/Time.h>
61 #include <shogun/kernel/Kernel.h>
63 
64 using namespace shogun;
65 
66 namespace shogun
67 {
68  static larank_kcache_t* larank_kcache_create (CKernel* kernelfunc)
69  {
70  larank_kcache_t *self;
71  self = SG_CALLOC (larank_kcache_t, 1);
72  self->l = 0;
73  self->maxrowlen = 0;
74  self->func = kernelfunc;
75  self->prevbuddy = self;
76  self->nextbuddy = self;
77  self->cursize = sizeof (larank_kcache_t);
78  self->maxsize = 256 * 1024 * 1024;
79  self->qprev = SG_MALLOC(int32_t, 1);
80  self->qnext = SG_MALLOC(int32_t, 1);
81  self->rnext = self->qnext + 1;
82  self->rprev = self->qprev + 1;
83  self->rprev[-1] = -1;
84  self->rnext[-1] = -1;
85  return self;
86  }
87 
88  static void xtruncate (larank_kcache_t * self, int32_t k, int32_t nlen)
89  {
90  int32_t olen = self->rsize[k];
91  if (nlen < olen)
92  {
93  float32_t *ndata;
94  float32_t *odata = self->rdata[k];
95  if (nlen > 0)
96  {
97  ndata = SG_MALLOC(float32_t, nlen);
98  memcpy (ndata, odata, nlen * sizeof (float32_t));
99  }
100  else
101  {
102  ndata = 0;
103  self->rnext[self->rprev[k]] = self->rnext[k];
104  self->rprev[self->rnext[k]] = self->rprev[k];
105  self->rnext[k] = self->rprev[k] = k;
106  }
107  SG_FREE (odata);
108  self->rdata[k] = ndata;
109  self->rsize[k] = nlen;
110  self->cursize += (int64_t) (nlen - olen) * sizeof (float32_t);
111  }
112  }
113 
114  static void xpurge (larank_kcache_t * self)
115  {
116  if (self->cursize > self->maxsize)
117  {
118  int32_t k = self->rprev[-1];
119  while (self->cursize > self->maxsize && k != self->rnext[-1])
120  {
121  int32_t pk = self->rprev[k];
122  xtruncate (self, k, 0);
123  k = pk;
124  }
125  }
126  }
127 
128  static void larank_kcache_set_maximum_size (larank_kcache_t * self, int64_t entries)
129  {
130  ASSERT (self)
131  ASSERT (entries > 0)
132  self->maxsize = entries;
133  xpurge (self);
134  }
135 
136  static void larank_kcache_destroy (larank_kcache_t * self)
137  {
138  if (self)
139  {
140  int32_t i;
141  larank_kcache_t *nb = self->nextbuddy;
142  larank_kcache_t *pb = self->prevbuddy;
143  pb->nextbuddy = nb;
144  nb->prevbuddy = pb;
145  /* delete */
146  if (self->i2r)
147  SG_FREE (self->i2r);
148  if (self->r2i)
149  SG_FREE (self->r2i);
150  if (self->rdata)
151  for (i = 0; i < self->l; i++)
152  if (self->rdata[i])
153  SG_FREE (self->rdata[i]);
154  if (self->rdata)
155  SG_FREE (self->rdata);
156  if (self->rsize)
157  SG_FREE (self->rsize);
158  if (self->rdiag)
159  SG_FREE (self->rdiag);
160  if (self->qnext)
161  SG_FREE (self->qnext);
162  if (self->qprev)
163  SG_FREE (self->qprev);
164  memset (self, 0, sizeof (larank_kcache_t));
165  SG_FREE (self);
166  }
167  }
168 
169  static void xminsize (larank_kcache_t * self, int32_t n)
170  {
171  int32_t ol = self->l;
172  if (n > ol)
173  {
174  int32_t i;
175  int32_t nl = CMath::max (256, ol);
176  while (nl < n)
177  nl = nl + nl;
178  self->i2r = SG_REALLOC (int32_t, self->i2r, self->l, nl);
179  self->r2i = SG_REALLOC (int32_t, self->r2i, self->l, nl);
180  self->rsize = SG_REALLOC (int32_t, self->rsize, self->l, nl);
181  self->qnext = SG_REALLOC (int32_t, self->qnext, 1+self->l, (1 + nl));
182  self->qprev = SG_REALLOC (int32_t, self->qprev, 1+self->l, (1 + nl));
183  self->rdiag = SG_REALLOC (float32_t, self->rdiag, self->l, nl);
184  self->rdata = SG_REALLOC (float32_t*, self->rdata, self->l, nl);
185  self->rnext = self->qnext + 1;
186  self->rprev = self->qprev + 1;
187  for (i = ol; i < nl; i++)
188  {
189  self->i2r[i] = i;
190  self->r2i[i] = i;
191  self->rsize[i] = -1;
192  self->rnext[i] = i;
193  self->rprev[i] = i;
194  self->rdata[i] = 0;
195  }
196  self->l = nl;
197  }
198  }
199 
200  static int32_t* larank_kcache_r2i (larank_kcache_t * self, int32_t n)
201  {
202  xminsize (self, n);
203  return self->r2i;
204  }
205 
206  static void xextend (larank_kcache_t * self, int32_t k, int32_t nlen)
207  {
208  int32_t olen = self->rsize[k];
209  if (nlen > olen)
210  {
211  float32_t *ndata = SG_MALLOC(float32_t, nlen);
212  if (olen > 0)
213  {
214  float32_t *odata = self->rdata[k];
215  memcpy (ndata, odata, olen * sizeof (float32_t));
216  SG_FREE (odata);
217  }
218  self->rdata[k] = ndata;
219  self->rsize[k] = nlen;
220  self->cursize += (int64_t) (nlen - olen) * sizeof (float32_t);
221  self->maxrowlen = CMath::max (self->maxrowlen, nlen);
222  }
223  }
224 
225  static void xswap (larank_kcache_t * self, int32_t i1, int32_t i2, int32_t r1, int32_t r2)
226  {
227  /* swap row data */
228  if (r1 < self->maxrowlen || r2 < self->maxrowlen)
229  {
230  int32_t mrl = 0;
231  int32_t k = self->rnext[-1];
232  while (k >= 0)
233  {
234  int32_t nk = self->rnext[k];
235  int32_t n = self->rsize[k];
236  int32_t rr = self->i2r[k];
237  float32_t *d = self->rdata[k];
238  if (r1 < n)
239  {
240  if (r2 < n)
241  {
242  float32_t t1 = d[r1];
243  float32_t t2 = d[r2];
244  d[r1] = t2;
245  d[r2] = t1;
246  }
247  else if (rr == r2)
248  {
249  d[r1] = self->rdiag[k];
250  }
251  else
252  {
253  int32_t arsize = self->rsize[i2];
254  if (rr < arsize && rr != r1)
255  d[r1] = self->rdata[i2][rr];
256  else
257  xtruncate (self, k, r1);
258  }
259  }
260  else if (r2 < n)
261  {
262  if (rr == r1)
263  {
264  d[r2] = self->rdiag[k];
265  }
266  else
267  {
268  int32_t arsize = self->rsize[i1];
269  if (rr < arsize && rr != r2)
270  d[r2] = self->rdata[i1][rr];
271  else
272  xtruncate (self, k, r2);
273  }
274  }
275  mrl = CMath::max (mrl, self->rsize[k]);
276  k = nk;
277  }
278  self->maxrowlen = mrl;
279  }
280  /* swap r2i and i2r */
281  self->r2i[r1] = i2;
282  self->r2i[r2] = i1;
283  self->i2r[i1] = r2;
284  self->i2r[i2] = r1;
285  }
286 
287  static void larank_kcache_swap_rr (larank_kcache_t * self, int32_t r1, int32_t r2)
288  {
289  xminsize (self, 1 + CMath::max(r1, r2));
290  xswap (self, self->r2i[r1], self->r2i[r2], r1, r2);
291  }
292 
293  static void larank_kcache_swap_ri (larank_kcache_t * self, int32_t r1, int32_t i2)
294  {
295  xminsize (self, 1 + CMath::max (r1, i2));
296  xswap (self, self->r2i[r1], i2, r1, self->i2r[i2]);
297  }
298 
299  static float64_t xquery (larank_kcache_t * self, int32_t i, int32_t j)
300  {
301  /* search buddies */
302  larank_kcache_t *cache = self->nextbuddy;
303  do
304  {
305  int32_t l = cache->l;
306  if (i < l && j < l)
307  {
308  int32_t s = cache->rsize[i];
309  int32_t p = cache->i2r[j];
310  if (p < s)
311  return cache->rdata[i][p];
312  if (i == j && s >= 0)
313  return cache->rdiag[i];
314  p = cache->i2r[i];
315  s = cache->rsize[j];
316  if (p < s)
317  return cache->rdata[j][p];
318  }
319  cache = cache->nextbuddy;
320  }
321  while (cache != self);
322  /* compute */
323  return self->func->kernel(i, j);
324  }
325 
326 
327  static float64_t larank_kcache_query (larank_kcache_t * self, int32_t i, int32_t j)
328  {
329  ASSERT (self)
330  ASSERT (i >= 0)
331  ASSERT (j >= 0)
332  return xquery (self, i, j);
333  }
334 
335 
336  static void larank_kcache_set_buddy (larank_kcache_t * self, larank_kcache_t * buddy)
337  {
338  larank_kcache_t *p = self;
339  larank_kcache_t *selflast = self->prevbuddy;
340  larank_kcache_t *buddylast = buddy->prevbuddy;
341  /* check functions are identical */
342  ASSERT (self->func == buddy->func)
343  /* make sure we are not already buddies */
344  do
345  {
346  if (p == buddy)
347  return;
348  p = p->nextbuddy;
349  }
350  while (p != self);
351  /* link */
352  selflast->nextbuddy = buddy;
353  buddy->prevbuddy = selflast;
354  buddylast->nextbuddy = self;
355  self->prevbuddy = buddylast;
356  }
357 
358 
359  static float32_t* larank_kcache_query_row (larank_kcache_t * self, int32_t i, int32_t len)
360  {
361  ASSERT (i >= 0)
362  if (i < self->l && len <= self->rsize[i])
363  {
364  self->rnext[self->rprev[i]] = self->rnext[i];
365  self->rprev[self->rnext[i]] = self->rprev[i];
366  }
367  else
368  {
369  int32_t olen, p;
370  float32_t *d;
371  if (i >= self->l || len >= self->l)
372  xminsize (self, CMath::max (1 + i, len));
373  olen = self->rsize[i];
374  if (olen < len)
375  {
376  if (olen < 0)
377  {
378  self->rdiag[i] = self->func->kernel(i, i);
379  olen = self->rsize[i] = 0;
380  }
381  xextend (self, i, len);
382  d = self->rdata[i];
383  self->rsize[i] = olen;
384  for (p = olen; p < len; p++)
385  d[p] = larank_kcache_query (self, self->r2i[p], i);
386  self->rsize[i] = len;
387  }
388  self->rnext[self->rprev[i]] = self->rnext[i];
389  self->rprev[self->rnext[i]] = self->rprev[i];
390  xpurge (self);
391  }
392  self->rprev[i] = -1;
393  self->rnext[i] = self->rnext[-1];
394  self->rnext[self->rprev[i]] = i;
395  self->rprev[self->rnext[i]] = i;
396  return self->rdata[i];
397  }
398 
399 }
400 
401 
402 // Initializing an output class (basically creating a kernel cache for it)
403 void LaRankOutput::initialize (CKernel* kfunc, int64_t cache)
404 {
405  kernel = larank_kcache_create (kfunc);
406  larank_kcache_set_maximum_size (kernel, cache * 1024 * 1024);
407  beta = SG_MALLOC(float32_t, 1);
408  g = SG_MALLOC(float32_t, 1);
409  *beta=0;
410  *g=0;
411  l = 0;
412 }
413 
414 // Destroying an output class (basically destroying the kernel cache)
415 void LaRankOutput::destroy ()
416 {
417  larank_kcache_destroy (kernel);
418  kernel=NULL;
419  SG_FREE(beta);
420  SG_FREE(g);
421  beta=NULL;
422  g=NULL;
423 }
424 
425 // !Important! Computing the score of a given input vector for the actual output
426 float64_t LaRankOutput::computeScore (int32_t x_id)
427 {
428  if (l == 0)
429  return 0;
430  else
431  {
432  float32_t *row = larank_kcache_query_row (kernel, x_id, l);
433  return CMath::dot (beta, row, l);
434  }
435 }
436 
437 // !Important! Computing the gradient of a given input vector for the actual output
438 float64_t LaRankOutput::computeGradient (int32_t xi_id, int32_t yi, int32_t ythis)
439 {
440  return (yi == ythis ? 1 : 0) - computeScore (xi_id);
441 }
442 
443 // Updating the solution in the actual output
444 void LaRankOutput::update (int32_t x_id, float64_t lambda, float64_t gp)
445 {
446  int32_t *r2i = larank_kcache_r2i (kernel, l);
447  int64_t xr = l + 1;
448  for (int32_t r = 0; r < l; r++)
449  if (r2i[r] == x_id)
450  {
451  xr = r;
452  break;
453  }
454 
455  // updates the cache order and the beta coefficient
456  if (xr < l)
457  {
458  beta[xr]+=lambda;
459  }
460  else
461  {
462  larank_kcache_swap_ri (kernel, l, x_id);
463  g = SG_REALLOC(float32_t, g, l, l+1);
464  beta = SG_REALLOC(float32_t, beta, l, l+1);
465  g[l]=gp;
466  beta[l]=lambda;
467  l++;
468  }
469 
470  // update stored gradients
471  float32_t *row = larank_kcache_query_row (kernel, x_id, l);
472  for (int32_t r = 0; r < l; r++)
473  {
474  float64_t oldg = g[r];
475  g[r]=oldg - lambda * row[r];
476  }
477 }
478 
479 // Linking the cahe of this output to the cache of an other "buddy" output
480 // so that if a requested value is not found in this cache, you can ask your buddy if it has it.
481 void LaRankOutput::set_kernel_buddy (larank_kcache_t * bud)
482 {
483  larank_kcache_set_buddy (bud, kernel);
484 }
485 
486 // Removing useless support vectors (for which beta=0)
487 int32_t LaRankOutput::cleanup ()
488 {
489  int32_t count = 0;
490  std::vector < int32_t >idx;
491  for (int32_t x = 0; x < l; x++)
492  {
493  if ((beta[x] < FLT_EPSILON) && (beta[x] > -FLT_EPSILON))
494  {
495  idx.push_back (x);
496  count++;
497  }
498  }
499  int32_t new_l = l - count;
500  for (int32_t xx = 0; xx < count; xx++)
501  {
502  int32_t i = idx[xx] - xx;
503  for (int32_t r = i; r < (l - 1); r++)
504  {
505  larank_kcache_swap_rr (kernel, r, int64_t(r) + 1);
506  beta[r]=beta[r + 1];
507  g[r]=g[r + 1];
508  }
509  }
510  beta = SG_REALLOC(float32_t, beta, l, new_l+1);
511  g = SG_REALLOC(float32_t, g, l, new_l+1);
512  beta[new_l]=0;
513  g[new_l]=0;
514  l = new_l;
515  return count;
516 }
517 
518 // --- Below are information or "get" functions --- //
519 //
520 float64_t LaRankOutput::getW2 ()
521 {
522  float64_t sum = 0;
523  int32_t *r2i = larank_kcache_r2i (kernel, l + 1);
524  for (int32_t r = 0; r < l; r++)
525  {
526  float32_t *row_r = larank_kcache_query_row (kernel, r2i[r], l);
527  sum += beta[r] * CMath::dot (beta, row_r, l);
528  }
529  return sum;
530 }
531 
532 float64_t LaRankOutput::getKii (int32_t x_id)
533 {
534  return larank_kcache_query (kernel, x_id, x_id);
535 }
536 
537 //
538 float64_t LaRankOutput::getBeta (int32_t x_id)
539 {
540  int32_t *r2i = larank_kcache_r2i (kernel, l);
541  int32_t xr = -1;
542  for (int32_t r = 0; r < l; r++)
543  if (r2i[r] == x_id)
544  {
545  xr = r;
546  break;
547  }
548  return (xr < 0 ? 0 : beta[xr]);
549 }
550 
551 //
552 float64_t LaRankOutput::getGradient (int32_t x_id)
553 {
554  int32_t *r2i = larank_kcache_r2i (kernel, l);
555  int32_t xr = -1;
556  for (int32_t r = 0; r < l; r++)
557  if (r2i[r] == x_id)
558  {
559  xr = r;
560  break;
561  }
562  return (xr < 0 ? 0 : g[xr]);
563 }
564 bool LaRankOutput::isSupportVector (int32_t x_id) const
565 {
566  int32_t *r2i = larank_kcache_r2i (kernel, l);
567  int32_t xr = -1;
568  for (int32_t r = 0; r < l; r++)
569  if (r2i[r] == x_id)
570  {
571  xr = r;
572  break;
573  }
574  return (xr >= 0);
575 }
576 
577 //
578 int32_t LaRankOutput::getSV (float32_t* &sv) const
579 {
580  sv=SG_MALLOC(float32_t, l);
581  int32_t *r2i = larank_kcache_r2i (kernel, l);
582  for (int32_t r = 0; r < l; r++)
583  sv[r]=r2i[r];
584  return l;
585 }
586 
588  nb_seen_examples (0), nb_removed (0),
589  n_pro (0), n_rep (0), n_opt (0),
590  w_pro (1), w_rep (1), w_opt (1), y0 (0), m_dual (0),
591  batch_mode(true), step(0)
592 {
593 }
594 
597  nb_seen_examples (0), nb_removed (0),
598  n_pro (0), n_rep (0), n_opt (0),
599  w_pro (1), w_rep (1), w_opt (1), y0 (0), m_dual (0),
600  batch_mode(true), step(0)
601 {
602 }
603 
605 {
606  destroy();
607 }
608 
610 {
611  tau = 0.0001;
612 
616 
618 
619  if (data)
620  {
621  if (data->get_num_vectors() != m_labels->get_num_labels())
622  {
623  SG_ERROR("Numbert of vectors (%d) does not match number of labels (%d)\n",
625  }
626  m_kernel->init(data, data);
627  }
628 
630 
633 
634  int32_t n_it = 1;
635  float64_t gap = DBL_MAX;
636 
637  SG_INFO("Training on %d examples\n", nb_train)
638  while (gap > get_C() && (!CSignal::cancel_computations())) // stopping criteria
639  {
640  float64_t tr_err = 0;
641  int32_t ind = step;
642  for (int32_t i = 0; i < nb_train; i++)
643  {
644  int32_t y=((CMulticlassLabels*) m_labels)->get_label(i);
645  if (add (i, y) != y) // call the add function
646  tr_err++;
647 
648  if (ind && i / ind)
649  {
650  SG_DEBUG("Done: %d %% Train error (online): %f%%\n",
651  (int32_t) (((float64_t) i) / nb_train * 100), (tr_err / ((float64_t) i + 1)) * 100);
652  ind += step;
653  }
654  }
655 
656  SG_DEBUG("End of iteration %d\n", n_it++)
657  SG_DEBUG("Train error (online): %f%%\n", (tr_err / nb_train) * 100)
658  gap = computeGap ();
659  SG_ABS_PROGRESS(gap, -CMath::log10(gap), -CMath::log10(DBL_MAX), -CMath::log10(get_C()), 6)
660 
661  if (!batch_mode) // skip stopping criteria if online mode
662  gap = 0;
663  }
664  SG_DONE()
665 
666  int32_t num_classes = outputs.size();
667  create_multiclass_svm(num_classes);
668  SG_DEBUG("%d classes\n", num_classes)
669 
670  // Used for saving a model file
671  int32_t i=0;
672  for (outputhash_t::const_iterator it = outputs.begin (); it != outputs.end (); ++it)
673  {
674  const LaRankOutput* o=&(it->second);
675 
676  larank_kcache_t* k=o->getKernel();
677  int32_t l=o->get_l();
678  float32_t* beta=o->getBetas();
679  int32_t *r2i = larank_kcache_r2i (k, l);
680 
681  ASSERT(l>0)
682  SG_DEBUG("svm[%d] has %d sv, b=%f\n", i, l, 0.0)
683 
684  CSVM* svm=new CSVM(l);
685 
686  for (int32_t j=0; j<l; j++)
687  {
688  svm->set_alpha(j, beta[j]);
689  svm->set_support_vector(j, r2i[j]);
690  }
691 
692  svm->set_bias(0);
693  set_svm(i, svm);
694  i++;
695  }
696  destroy();
697 
698  return true;
699 }
700 
701 // LEARNING FUNCTION: add new patterns and run optimization steps selected with adaptative schedule
702 int32_t CLaRank::add (int32_t x_id, int32_t yi)
703 {
704  ++nb_seen_examples;
705  // create a new output object if this one has never been seen before
706  if (!getOutput (yi))
707  {
708  outputs.insert (std::make_pair (yi, LaRankOutput ()));
709  LaRankOutput *cur = getOutput (yi);
710  cur->initialize (m_kernel, cache);
711  if (outputs.size () == 1)
712  y0 = outputs.begin ()->first;
713  // link the cache of this new output to a buddy
714  if (outputs.size () > 1)
715  {
716  LaRankOutput *out0 = getOutput (y0);
717  cur->set_kernel_buddy (out0->getKernel ());
718  }
719  }
720 
721  LaRankPattern tpattern (x_id, yi);
722  LaRankPattern & pattern = (patterns.isPattern (x_id)) ? patterns.getPattern (x_id) : tpattern;
723 
724  // ProcessNew with the "fresh" pattern
725  float64_t time1 = CTime::get_curtime();
726  process_return_t pro_ret = process (pattern, processNew);
727  float64_t dual_increase = pro_ret.dual_increase;
728  float64_t duration = (CTime::get_curtime() - time1);
729  float64_t coeff = dual_increase / (0.00001 + duration);
730  m_dual += dual_increase;
731  n_pro++;
732  w_pro = 0.05 * coeff + (1 - 0.05) * w_pro;
733 
734  // ProcessOld & Optimize until ready for a new processnew
735  // (Adaptative schedule here)
736  for (;;)
737  {
738  float64_t w_sum = w_pro + w_rep + w_opt;
739  float64_t prop_min = w_sum / 20;
740  if (w_pro < prop_min)
741  w_pro = prop_min;
742  if (w_rep < prop_min)
743  w_rep = prop_min;
744  if (w_opt < prop_min)
745  w_opt = prop_min;
746  w_sum = w_pro + w_rep + w_opt;
747  float64_t r = CMath::random(0.0, w_sum);
748  if (r <= w_pro)
749  {
750  break;
751  }
752  else if ((r > w_pro) && (r <= w_pro + w_rep)) // ProcessOld here
753  {
754  float64_t ltime1 = CTime::get_curtime ();
755  float64_t ldual_increase = reprocess ();
756  float64_t lduration = (CTime::get_curtime () - ltime1);
757  float64_t lcoeff = ldual_increase / (0.00001 + lduration);
758  m_dual += ldual_increase;
759  n_rep++;
760  w_rep = 0.05 * lcoeff + (1 - 0.05) * w_rep;
761  }
762  else // Optimize here
763  {
764  float64_t ltime1 = CTime::get_curtime ();
765  float64_t ldual_increase = optimize ();
766  float64_t lduration = (CTime::get_curtime () - ltime1);
767  float64_t lcoeff = ldual_increase / (0.00001 + lduration);
768  m_dual += ldual_increase;
769  n_opt++;
770  w_opt = 0.05 * lcoeff + (1 - 0.05) * w_opt;
771  }
772  }
773  if (nb_seen_examples % 100 == 0) // Cleanup useless Support Vectors/Patterns sometimes
774  nb_removed += cleanup ();
775  return pro_ret.ypred;
776 }
777 
778 // PREDICTION FUNCTION: main function in la_rank_classify
779 int32_t CLaRank::predict (int32_t x_id)
780 {
781  int32_t res = -1;
782  float64_t score_max = -DBL_MAX;
783  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end ();++it)
784  {
785  float64_t score = it->second.computeScore (x_id);
786  if (score > score_max)
787  {
788  score_max = score;
789  res = it->first;
790  }
791  }
792  return res;
793 }
794 
796 {
797  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end ();++it)
798  it->second.destroy ();
799  outputs.clear();
800 }
801 
802 
803 // Compute Duality gap (costly but used in stopping criteria in batch mode)
805 {
806  float64_t sum_sl = 0;
807  float64_t sum_bi = 0;
808  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
809  {
810  const LaRankPattern & p = patterns[i];
811  if (!p.exists ())
812  continue;
813  LaRankOutput *out = getOutput (p.y);
814  if (!out)
815  continue;
816  sum_bi += out->getBeta (p.x_id);
817  float64_t gi = out->computeGradient (p.x_id, p.y, p.y);
818  float64_t gmin = DBL_MAX;
819  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
820  {
821  if (it->first != p.y && it->second.isSupportVector (p.x_id))
822  {
823  float64_t g =
824  it->second.computeGradient (p.x_id, p.y, it->first);
825  if (g < gmin)
826  gmin = g;
827  }
828  }
829  sum_sl += CMath::max (0.0, gi - gmin);
830  }
831  return CMath::max (0.0, computeW2 () + get_C() * sum_sl - sum_bi);
832 }
833 
834 // Nuber of classes so far
835 uint32_t CLaRank::getNumOutputs () const
836 {
837  return outputs.size ();
838 }
839 
840 // Number of Support Vectors
841 int32_t CLaRank::getNSV ()
842 {
843  int32_t res = 0;
844  for (outputhash_t::const_iterator it = outputs.begin (); it != outputs.end (); ++it)
845  {
846  float32_t* sv=NULL;
847  res += it->second.getSV (sv);
848  SG_FREE(sv);
849  }
850  return res;
851 }
852 
853 // Norm of the parameters vector
855 {
856  float64_t res = 0;
857  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
858  {
859  const LaRankPattern & p = patterns[i];
860  if (!p.exists ())
861  continue;
862  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
863  if (it->second.getBeta (p.x_id))
864  res += it->second.getBeta (p.x_id) * it->second.computeScore (p.x_id);
865  }
866  return res;
867 }
868 
869 // Compute Dual objective value
871 {
872  float64_t res = 0;
873  for (uint32_t i = 0; i < patterns.maxcount (); ++i)
874  {
875  const LaRankPattern & p = patterns[i];
876  if (!p.exists ())
877  continue;
878  LaRankOutput *out = getOutput (p.y);
879  if (!out)
880  continue;
881  res += out->getBeta (p.x_id);
882  }
883  return res - computeW2 () / 2;
884 }
885 
886 LaRankOutput *CLaRank::getOutput (int32_t index)
887 {
888  outputhash_t::iterator it = outputs.find (index);
889  return it == outputs.end ()? NULL : &it->second;
890 }
891 
892 // IMPORTANT Main SMO optimization step
893 CLaRank::process_return_t CLaRank::process (const LaRankPattern & pattern, process_type ptype)
894 {
895  process_return_t pro_ret = process_return_t (0, 0);
896 
897  /*
898  ** compute gradient and sort
899  */
900  std::vector < outputgradient_t > outputgradients;
901 
902  outputgradients.reserve (getNumOutputs ());
903 
904  std::vector < outputgradient_t > outputscores;
905  outputscores.reserve (getNumOutputs ());
906 
907  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
908  {
909  if (ptype != processOptimize
910  || it->second.isSupportVector (pattern.x_id))
911  {
912  float64_t g =
913  it->second.computeGradient (pattern.x_id, pattern.y, it->first);
914  outputgradients.push_back (outputgradient_t (it->first, g));
915  if (it->first == pattern.y)
916  outputscores.push_back (outputgradient_t (it->first, (1 - g)));
917  else
918  outputscores.push_back (outputgradient_t (it->first, -g));
919  }
920  }
921 
922  std::sort (outputgradients.begin (), outputgradients.end ());
923 
924  /*
925  ** determine the prediction
926  */
927  std::sort (outputscores.begin (), outputscores.end ());
928  pro_ret.ypred = outputscores[0].output;
929 
930  /*
931  ** Find yp (1st part of the pair)
932  */
933  outputgradient_t ygp;
934  LaRankOutput *outp = NULL;
935  uint32_t p;
936  for (p = 0; p < outputgradients.size (); ++p)
937  {
938  outputgradient_t & current = outputgradients[p];
939  LaRankOutput *output = getOutput (current.output);
940  bool support = ptype == processOptimize || output->isSupportVector (pattern.x_id);
941  bool goodclass = current.output == pattern.y;
942  if ((!support && goodclass) ||
943  (support && output->getBeta (pattern.x_id) < (goodclass ? get_C() : 0)))
944  {
945  ygp = current;
946  outp = output;
947  break;
948  }
949  }
950  if (p == outputgradients.size ())
951  return pro_ret;
952 
953  /*
954  ** Find ym (2nd part of the pair)
955  */
956  outputgradient_t ygm;
957  LaRankOutput *outm = NULL;
958  int32_t m;
959  for (m = outputgradients.size () - 1; m >= 0; --m)
960  {
961  outputgradient_t & current = outputgradients[m];
962  LaRankOutput *output = getOutput (current.output);
963  bool support = ptype == processOptimize || output->isSupportVector (pattern.x_id);
964  bool goodclass = current.output == pattern.y;
965  if (!goodclass || (support && output->getBeta (pattern.x_id) > 0))
966  {
967  ygm = current;
968  outm = output;
969  break;
970  }
971  }
972  if (m < 0)
973  return pro_ret;
974 
975  /*
976  ** Throw or Insert pattern
977  */
978  if ((ygp.gradient - ygm.gradient) < tau)
979  return pro_ret;
980  if (ptype == processNew)
981  patterns.insert (pattern);
982 
983  /*
984  ** compute lambda and clip it
985  */
986  float64_t kii = outp->getKii (pattern.x_id);
987  float64_t lambda = (ygp.gradient - ygm.gradient) / (2 * kii);
988  if (ptype == processOptimize || outp->isSupportVector (pattern.x_id))
989  {
990  float64_t beta = outp->getBeta (pattern.x_id);
991  if (ygp.output == pattern.y)
992  lambda = CMath::min (lambda, get_C() - beta);
993  else
994  lambda = CMath::min (lambda, fabs (beta));
995  }
996  else
997  lambda = CMath::min (lambda, get_C());
998 
999  /*
1000  ** update the solution
1001  */
1002  outp->update (pattern.x_id, lambda, ygp.gradient);
1003  outm->update (pattern.x_id, -lambda, ygm.gradient);
1004 
1005  pro_ret.dual_increase = lambda * ((ygp.gradient - ygm.gradient) - lambda * kii);
1006  return pro_ret;
1007 }
1008 
1009 // ProcessOld
1010 float64_t CLaRank::reprocess ()
1011 {
1012  if (patterns.size ())
1013  {
1014  for (int32_t n = 0; n < 10; ++n)
1015  {
1016  process_return_t pro_ret = process (patterns.sample (), processOld);
1017  if (pro_ret.dual_increase)
1018  return pro_ret.dual_increase;
1019  }
1020  }
1021  return 0;
1022 }
1023 
1024 // Optimize
1025 float64_t CLaRank::optimize ()
1026 {
1027  float64_t dual_increase = 0;
1028  if (patterns.size ())
1029  {
1030  for (int32_t n = 0; n < 10; ++n)
1031  {
1032  process_return_t pro_ret =
1033  process (patterns.sample(), processOptimize);
1034  dual_increase += pro_ret.dual_increase;
1035  }
1036  }
1037  return dual_increase;
1038 }
1039 
1040 // remove patterns and return the number of patterns that were removed
1041 uint32_t CLaRank::cleanup ()
1042 {
1043  /*
1044  for (outputhash_t::iterator it = outputs.begin (); it != outputs.end (); ++it)
1045  it->second.cleanup ();
1046 
1047  uint32_t res = 0;
1048  for (uint32_t i = 0; i < patterns.size (); ++i)
1049  {
1050  LaRankPattern & p = patterns[i];
1051  if (p.exists () && !outputs[p.y].isSupportVector (p.x_id))
1052  {
1053  patterns.remove (i);
1054  ++res;
1055  }
1056  }
1057  return res;
1058  */
1059  return 0;
1060 }
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
static void xtruncate(larank_kcache_t *self, int32_t k, int32_t nlen)
Definition: LaRank.cpp:88
static float32_t * larank_kcache_query_row(larank_kcache_t *self, int32_t i, int32_t len)
Definition: LaRank.cpp:359
#define SG_INFO(...)
Definition: SGIO.h:118
#define SG_DONE()
Definition: SGIO.h:157
bool batch_mode
whether to use online learning or batch training
Definition: LaRank.h:484
virtual void destroy()
Definition: LaRank.cpp:795
int32_t getNSV()
Definition: LaRank.cpp:841
virtual ELabelType get_label_type() const =0
static void larank_kcache_set_maximum_size(larank_kcache_t *self, int64_t entries)
Definition: LaRank.cpp:128
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
int64_t cache
cache
Definition: LaRank.h:482
bool train_machine(CFeatures *data)
Definition: LaRank.cpp:609
virtual int32_t get_num_labels() const =0
static void xswap(larank_kcache_t *self, int32_t i1, int32_t i2, int32_t r1, int32_t r2)
Definition: LaRank.cpp:225
float64_t tau
tau
Definition: LaRank.h:477
multi-class labels 0,1,...
Definition: LabelTypes.h:20
static float64_t log10(float64_t v)
Definition: Math.h:897
void(* update)(float *foo, float bar)
Definition: JLCoverTree.h:528
virtual int32_t get_num_vectors() const =0
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
static float64_t larank_kcache_query(larank_kcache_t *self, int32_t i, int32_t j)
Definition: LaRank.cpp:327
virtual int32_t add(int32_t x_id, int32_t yi)
Definition: LaRank.cpp:702
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:516
virtual float64_t computeGap()
Definition: LaRank.cpp:804
static uint64_t random()
Definition: Math.h:1019
static void larank_kcache_set_buddy(larank_kcache_t *self, larank_kcache_t *buddy)
Definition: LaRank.cpp:336
static float64_t get_curtime()
Definition: Time.h:121
float64_t computeW2()
Definition: LaRank.cpp:854
int32_t nb_train
nb train
Definition: LaRank.h:480
static void xextend(larank_kcache_t *self, int32_t k, int32_t nlen)
Definition: LaRank.cpp:206
Multiclass Labels for multi-class classification.
static float64_t xquery(larank_kcache_t *self, int32_t i, int32_t j)
Definition: LaRank.cpp:299
static void larank_kcache_swap_rr(larank_kcache_t *self, int32_t r1, int32_t r2)
Definition: LaRank.cpp:287
#define ASSERT(x)
Definition: SGIO.h:201
float64_t getDual()
Definition: LaRank.cpp:870
class MultiClassSVM
Definition: MulticlassSVM.h:28
void set_bias(float64_t bias)
static void clear_cancel()
Definition: Signal.cpp:129
virtual ~CLaRank()
Definition: LaRank.cpp:604
double float64_t
Definition: common.h:50
bool set_alpha(int32_t idx, float64_t val)
static T max(T a, T b)
Definition: Math.h:168
bool set_support_vector(int32_t idx, int32_t val)
static int32_t * larank_kcache_r2i(larank_kcache_t *self, int32_t n)
Definition: LaRank.cpp:200
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
Definition: Math.h:627
virtual uint32_t getNumOutputs() const
Definition: LaRank.cpp:835
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:525
#define SG_ABS_PROGRESS(...)
Definition: SGIO.h:152
static void larank_kcache_destroy(larank_kcache_t *self)
Definition: LaRank.cpp:136
float float32_t
Definition: common.h:49
static void xminsize(larank_kcache_t *self, int32_t n)
Definition: LaRank.cpp:169
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
static void larank_kcache_swap_ri(larank_kcache_t *self, int32_t r1, int32_t i2)
Definition: LaRank.cpp:293
static larank_kcache_t * larank_kcache_create(CKernel *kernelfunc)
Definition: LaRank.cpp:68
The class Features is the base class of all feature objects.
Definition: Features.h:68
bool create_multiclass_svm(int32_t num_classes)
static T min(T a, T b)
Definition: Math.h:157
static void xpurge(larank_kcache_t *self)
Definition: LaRank.cpp:114
A generic Support Vector Machine Interface.
Definition: SVM.h:49
virtual int32_t predict(int32_t x_id)
Definition: LaRank.cpp:779
The Kernel base class.
Definition: Kernel.h:158
int32_t get_cache_size()
Definition: Kernel.h:598
multiclass one vs rest strategy used to train generic multiclass machines for K-class problems with b...
bool set_svm(int32_t num, CSVM *svm)
int32_t step
progess output
Definition: LaRank.h:487

SHOGUN 机器学习工具包 - 项目文档