SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * EXCEPT FOR THE KERNEL CACHING FUNCTIONS WHICH ARE (W) THORSTEN JOACHIMS
3  * COPYRIGHT (C) 1999 UNIVERSITAET DORTMUND - ALL RIGHTS RESERVED
4  *
5  * this program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 3 of the License, or
8  * (at your option) any later version.
9  *
10  * Written (W) 1999-2009 Soeren Sonnenburg
11  * Written (W) 1999-2008 Gunnar Raetsch
12  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
13  */
14 
15 #include <shogun/lib/config.h>
16 #include <shogun/lib/common.h>
17 #include <shogun/io/SGIO.h>
18 #include <shogun/io/File.h>
19 #include <shogun/lib/Time.h>
20 #include <shogun/lib/Signal.h>
21 
22 #include <shogun/base/Parallel.h>
23 
24 #include <shogun/kernel/Kernel.h>
27 #include <shogun/base/Parameter.h>
28 
30 
31 #include <string.h>
32 #include <unistd.h>
33 #include <math.h>
34 
35 #ifdef HAVE_PTHREAD
36 #include <pthread.h>
37 #endif
38 
39 using namespace shogun;
40 
42 {
43  init();
45 }
46 
47 CKernel::CKernel(int32_t size) : CSGObject()
48 {
49  init();
50 
51  if (size<10)
52  size=10;
53 
54  cache_size=size;
56 }
57 
58 
59 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
60 {
61  init();
62 
63  if (size<10)
64  size=10;
65 
66  cache_size=size;
67 
69  init(p_lhs, p_rhs);
71 }
72 
74 {
75  if (get_is_initialized())
76  SG_ERROR("Kernel still initialized on destruction.\n");
77 
80 
81  SG_INFO("Kernel deleted (%p).\n", this);
82 }
83 
84 #ifdef USE_SVMLIGHT
85 void CKernel::resize_kernel_cache(KERNELCACHE_IDX size, bool regression_hack)
86 {
87  if (size<10)
88  size=10;
89 
91  cache_size=size;
92 
93  if (has_features() && get_num_vec_lhs())
94  kernel_cache_init(cache_size, regression_hack);
95 }
96 #endif //USE_SVMLIGHT
97 
98 bool CKernel::init(CFeatures* l, CFeatures* r)
99 {
100  /* make sure that features are not deleted if same ones are used */
101  SG_REF(l);
102  SG_REF(r);
103 
104  //make sure features were indeed supplied
105  ASSERT(l);
106  ASSERT(r);
107 
108  //make sure features are compatible
111 
112  //remove references to previous features
114 
115  //increase reference counts
116  SG_REF(l);
117  if (l==r)
118  lhs_equals_rhs=true;
119  else // l!=r
120  SG_REF(r);
121 
122  lhs=l;
123  rhs=r;
124 
127 
130 
131  /* unref "safety" refs from beginning */
132  SG_UNREF(r);
133  SG_UNREF(l);
134 
135  return true;
136 }
137 
139 {
140  SG_REF(n);
141  if (lhs && rhs)
142  n->init(this);
143 
145  normalizer=n;
146 
147  return (normalizer!=NULL);
148 }
149 
151 {
153  return normalizer;
154 }
155 
157 {
158  return normalizer->init(this);
159 }
160 
162 {
164 }
165 
166 #ifdef USE_SVMLIGHT
167 /****************************** Cache handling *******************************/
168 
169 void CKernel::kernel_cache_init(int32_t buffsize, bool regression_hack)
170 {
171  int32_t totdoc=get_num_vec_lhs();
172  if (totdoc<=0)
173  {
174  SG_ERROR("kernel has zero rows: num_lhs=%d num_rhs=%d\n",
176  }
177  uint64_t buffer_size=0;
178  int32_t i;
179 
180  //in regression the additional constraints are made by doubling the training data
181  if (regression_hack)
182  totdoc*=2;
183 
184  buffer_size=((uint64_t) buffsize)*1024*1024/sizeof(KERNELCACHE_ELEM);
185  if (buffer_size>((uint64_t) totdoc)*totdoc)
186  buffer_size=((uint64_t) totdoc)*totdoc;
187 
188  SG_INFO( "using a kernel cache of size %lld MB (%lld bytes) for %s Kernel\n", buffer_size*sizeof(KERNELCACHE_ELEM)/1024/1024, buffer_size*sizeof(KERNELCACHE_ELEM), get_name());
189 
190  //make sure it fits in the *signed* KERNELCACHE_IDX type
191  ASSERT(buffer_size < (((uint64_t) 1) << (sizeof(KERNELCACHE_IDX)*8-1)));
192 
193  kernel_cache.index = SG_MALLOC(int32_t, totdoc);
194  kernel_cache.occu = SG_MALLOC(int32_t, totdoc);
195  kernel_cache.lru = SG_MALLOC(int32_t, totdoc);
196  kernel_cache.invindex = SG_MALLOC(int32_t, totdoc);
197  kernel_cache.active2totdoc = SG_MALLOC(int32_t, totdoc);
198  kernel_cache.totdoc2active = SG_MALLOC(int32_t, totdoc);
199  kernel_cache.buffer = SG_MALLOC(KERNELCACHE_ELEM, buffer_size);
200  kernel_cache.buffsize=buffer_size;
201  kernel_cache.max_elems=(int32_t) (kernel_cache.buffsize/totdoc);
202 
203  if(kernel_cache.max_elems>totdoc) {
204  kernel_cache.max_elems=totdoc;
205  }
206 
207  kernel_cache.elems=0; // initialize cache
208  for(i=0;i<totdoc;i++) {
209  kernel_cache.index[i]=-1;
210  kernel_cache.lru[i]=0;
211  }
212  for(i=0;i<totdoc;i++) {
213  kernel_cache.occu[i]=0;
214  kernel_cache.invindex[i]=-1;
215  }
216 
217  kernel_cache.activenum=totdoc;;
218  for(i=0;i<totdoc;i++) {
219  kernel_cache.active2totdoc[i]=i;
220  kernel_cache.totdoc2active[i]=i;
221  }
222 
223  kernel_cache.time=0;
224 }
225 
227  int32_t docnum, int32_t *active2dnum, float64_t *buffer, bool full_line)
228 {
229  int32_t i,j;
230  KERNELCACHE_IDX start;
231 
232  int32_t num_vectors = get_num_vec_lhs();
233  if (docnum>=num_vectors)
234  docnum=2*num_vectors-1-docnum;
235 
236  /* is cached? */
237  if(kernel_cache.index[docnum] != -1)
238  {
239  kernel_cache.lru[kernel_cache.index[docnum]]=kernel_cache.time; /* lru */
240  start=((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[docnum];
241 
242  if (full_line)
243  {
244  for(j=0;j<get_num_vec_lhs();j++)
245  {
246  if(kernel_cache.totdoc2active[j] >= 0)
247  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
248  else
249  buffer[j]=(float64_t) kernel(docnum, j);
250  }
251  }
252  else
253  {
254  for(i=0;(j=active2dnum[i])>=0;i++)
255  {
256  if(kernel_cache.totdoc2active[j] >= 0)
257  buffer[j]=kernel_cache.buffer[start+kernel_cache.totdoc2active[j]];
258  else
259  {
260  int32_t k=j;
261  if (k>=num_vectors)
262  k=2*num_vectors-1-k;
263  buffer[j]=(float64_t) kernel(docnum, k);
264  }
265  }
266  }
267  }
268  else
269  {
270  if (full_line)
271  {
272  for(j=0;j<get_num_vec_lhs();j++)
273  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, j);
274  }
275  else
276  {
277  for(i=0;(j=active2dnum[i])>=0;i++)
278  {
279  int32_t k=j;
280  if (k>=num_vectors)
281  k=2*num_vectors-1-k;
282  buffer[j]=(KERNELCACHE_ELEM) kernel(docnum, k);
283  }
284  }
285  }
286 }
287 
288 
289 // Fills cache for the row m
291 {
292  register int32_t j,k,l;
293  register KERNELCACHE_ELEM *cache;
294 
295  int32_t num_vectors = get_num_vec_lhs();
296 
297  if (m>=num_vectors)
298  m=2*num_vectors-1-m;
299 
300  if(!kernel_cache_check(m)) // not cached yet
301  {
302  cache = kernel_cache_clean_and_malloc(m);
303  if(cache) {
304  l=kernel_cache.totdoc2active[m];
305 
306  for(j=0;j<kernel_cache.activenum;j++) // fill cache
307  {
308  k=kernel_cache.active2totdoc[j];
309 
310  if((kernel_cache.index[k] != -1) && (l != -1) && (k != m)) {
311  cache[j]=kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)
312  *kernel_cache.index[k]+l];
313  }
314  else
315  {
316  if (k>=num_vectors)
317  k=2*num_vectors-1-k;
318 
319  cache[j]=kernel(m, k);
320  }
321  }
322  }
323  else
324  perror("Error: Kernel cache full! => increase cache size");
325  }
326 }
327 
328 
329 void* CKernel::cache_multiple_kernel_row_helper(void* p)
330 {
331  int32_t j,k,l;
332  S_KTHREAD_PARAM* params = (S_KTHREAD_PARAM*) p;
333 
334  for (int32_t i=params->start; i<params->end; i++)
335  {
336  KERNELCACHE_ELEM* cache=params->cache[i];
337  int32_t m = params->uncached_rows[i];
338  l=params->kernel_cache->totdoc2active[m];
339 
340  for(j=0;j<params->kernel_cache->activenum;j++) // fill cache
341  {
342  k=params->kernel_cache->active2totdoc[j];
343 
344  if((params->kernel_cache->index[k] != -1) && (l != -1) && (!params->needs_computation[k])) {
345  cache[j]=params->kernel_cache->buffer[((KERNELCACHE_IDX) params->kernel_cache->activenum)
346  *params->kernel_cache->index[k]+l];
347  }
348  else
349  {
350  if (k>=params->num_vectors)
351  k=2*params->num_vectors-1-k;
352 
353  cache[j]=params->kernel->kernel(m, k);
354  }
355  }
356 
357  //now line m is cached
358  params->needs_computation[m]=0;
359  }
360  return NULL;
361 }
362 
363 // Fills cache for the rows in key
364 void CKernel::cache_multiple_kernel_rows(int32_t* rows, int32_t num_rows)
365 {
366 #ifdef HAVE_PTHREAD
367  int32_t nthreads=parallel->get_num_threads();
368 
369  if (nthreads<2)
370  {
371 #endif
372  for(int32_t i=0;i<num_rows;i++)
373  cache_kernel_row(rows[i]);
374 #ifdef HAVE_PTHREAD
375  }
376  else
377  {
378  // fill up kernel cache
379  int32_t* uncached_rows = SG_MALLOC(int32_t, num_rows);
380  KERNELCACHE_ELEM** cache = SG_MALLOC(KERNELCACHE_ELEM*, num_rows);
381  pthread_t* threads = SG_MALLOC(pthread_t, nthreads-1);
382  S_KTHREAD_PARAM* params = SG_MALLOC(S_KTHREAD_PARAM, nthreads-1);
383  int32_t num_threads=nthreads-1;
384  int32_t num_vec=get_num_vec_lhs();
385  ASSERT(num_vec>0);
386  uint8_t* needs_computation=SG_CALLOC(uint8_t, num_vec);
387 
388  int32_t step=0;
389  int32_t num=0;
390  int32_t end=0;
391 
392  // allocate cachelines if necessary
393  for (int32_t i=0; i<num_rows; i++)
394  {
395  int32_t idx=rows[i];
396  if (idx>=num_vec)
397  idx=2*num_vec-1-idx;
398 
399  if (kernel_cache_check(idx))
400  continue;
401 
402  needs_computation[idx]=1;
403  uncached_rows[num]=idx;
404  cache[num]= kernel_cache_clean_and_malloc(idx);
405 
406  if (!cache[num])
407  SG_ERROR("Kernel cache full! => increase cache size\n");
408 
409  num++;
410  }
411 
412  if (num>0)
413  {
414  step= num/nthreads;
415 
416  if (step<1)
417  {
418  num_threads=num-1;
419  step=1;
420  }
421 
422  for (int32_t t=0; t<num_threads; t++)
423  {
424  params[t].kernel = this;
425  params[t].kernel_cache = &kernel_cache;
426  params[t].cache = cache;
427  params[t].uncached_rows = uncached_rows;
428  params[t].needs_computation = needs_computation;
429  params[t].num_uncached = num;
430  params[t].start = t*step;
431  params[t].end = (t+1)*step;
432  params[t].num_vectors = get_num_vec_lhs();
433  end=params[t].end;
434 
435  int code=pthread_create(&threads[t], NULL,
436  CKernel::cache_multiple_kernel_row_helper, (void*)&params[t]);
437 
438  if (code != 0)
439  {
440  SG_WARNING("Thread creation failed (thread %d of %d) "
441  "with error:'%s'\n",t, num_threads, strerror(code));
442  num_threads=t;
443  end=t*step;
444  break;
445  }
446  }
447  }
448  else
449  num_threads=-1;
450 
451 
452  S_KTHREAD_PARAM last_param;
453  last_param.kernel = this;
454  last_param.kernel_cache = &kernel_cache;
455  last_param.cache = cache;
456  last_param.uncached_rows = uncached_rows;
457  last_param.needs_computation = needs_computation;
458  last_param.start = end;
459  last_param.num_uncached = num;
460  last_param.end = num;
461  last_param.num_vectors = get_num_vec_lhs();
462 
463  cache_multiple_kernel_row_helper(&last_param);
464 
465 
466  for (int32_t t=0; t<num_threads; t++)
467  {
468  if (pthread_join(threads[t], NULL) != 0)
469  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads);
470  }
471 
472  SG_FREE(needs_computation);
473  SG_FREE(params);
474  SG_FREE(threads);
475  SG_FREE(cache);
476  SG_FREE(uncached_rows);
477  }
478 #endif
479 }
480 
481 // remove numshrink columns in the cache
482 // which correspond to examples marked
484  int32_t totdoc, int32_t numshrink, int32_t *after)
485 {
486  register int32_t i,j,jj,scount; // 0 in after.
487  KERNELCACHE_IDX from=0,to=0;
488  int32_t *keep;
489 
490  keep=SG_MALLOC(int32_t, totdoc);
491  for(j=0;j<totdoc;j++) {
492  keep[j]=1;
493  }
494  scount=0;
495  for(jj=0;(jj<kernel_cache.activenum) && (scount<numshrink);jj++) {
496  j=kernel_cache.active2totdoc[jj];
497  if(!after[j]) {
498  scount++;
499  keep[j]=0;
500  }
501  }
502 
503  for(i=0;i<kernel_cache.max_elems;i++) {
504  for(jj=0;jj<kernel_cache.activenum;jj++) {
505  j=kernel_cache.active2totdoc[jj];
506  if(!keep[j]) {
507  from++;
508  }
509  else {
510  kernel_cache.buffer[to]=kernel_cache.buffer[from];
511  to++;
512  from++;
513  }
514  }
515  }
516 
517  kernel_cache.activenum=0;
518  for(j=0;j<totdoc;j++) {
519  if((keep[j]) && (kernel_cache.totdoc2active[j] != -1)) {
520  kernel_cache.active2totdoc[kernel_cache.activenum]=j;
521  kernel_cache.totdoc2active[j]=kernel_cache.activenum;
522  kernel_cache.activenum++;
523  }
524  else {
525  kernel_cache.totdoc2active[j]=-1;
526  }
527  }
528 
529  kernel_cache.max_elems=
530  (int32_t)(kernel_cache.buffsize/kernel_cache.activenum);
531  if(kernel_cache.max_elems>totdoc) {
532  kernel_cache.max_elems=totdoc;
533  }
534 
535  SG_FREE(keep);
536 
537 }
538 
540 {
541  int32_t maxlru=0,k;
542 
543  for(k=0;k<kernel_cache.max_elems;k++) {
544  if(maxlru < kernel_cache.lru[k])
545  maxlru=kernel_cache.lru[k];
546  }
547  for(k=0;k<kernel_cache.max_elems;k++) {
548  kernel_cache.lru[k]-=maxlru;
549  }
550 }
551 
553 {
554  SG_FREE(kernel_cache.index);
555  SG_FREE(kernel_cache.occu);
556  SG_FREE(kernel_cache.lru);
557  SG_FREE(kernel_cache.invindex);
558  SG_FREE(kernel_cache.active2totdoc);
559  SG_FREE(kernel_cache.totdoc2active);
560  SG_FREE(kernel_cache.buffer);
561  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
562 }
563 
564 int32_t CKernel::kernel_cache_malloc()
565 {
566  int32_t i;
567 
569  for(i=0;i<kernel_cache.max_elems;i++) {
570  if(!kernel_cache.occu[i]) {
571  kernel_cache.occu[i]=1;
572  kernel_cache.elems++;
573  return(i);
574  }
575  }
576  }
577  return(-1);
578 }
579 
580 void CKernel::kernel_cache_free(int32_t cacheidx)
581 {
582  kernel_cache.occu[cacheidx]=0;
583  kernel_cache.elems--;
584 }
585 
586 // remove least recently used cache
587 // element
588 int32_t CKernel::kernel_cache_free_lru()
589 {
590  register int32_t k,least_elem=-1,least_time;
591 
592  least_time=kernel_cache.time+1;
593  for(k=0;k<kernel_cache.max_elems;k++) {
594  if(kernel_cache.invindex[k] != -1) {
595  if(kernel_cache.lru[k]<least_time) {
596  least_time=kernel_cache.lru[k];
597  least_elem=k;
598  }
599  }
600  }
601 
602  if(least_elem != -1) {
603  kernel_cache_free(least_elem);
604  kernel_cache.index[kernel_cache.invindex[least_elem]]=-1;
605  kernel_cache.invindex[least_elem]=-1;
606  return(1);
607  }
608  return(0);
609 }
610 
611 // Get a free cache entry. In case cache is full, the lru
612 // element is removed.
613 KERNELCACHE_ELEM* CKernel::kernel_cache_clean_and_malloc(int32_t cacheidx)
614 {
615  int32_t result;
616  if((result = kernel_cache_malloc()) == -1) {
617  if(kernel_cache_free_lru()) {
618  result = kernel_cache_malloc();
619  }
620  }
621  kernel_cache.index[cacheidx]=result;
622  if(result == -1) {
623  return(0);
624  }
625  kernel_cache.invindex[result]=cacheidx;
626  kernel_cache.lru[kernel_cache.index[cacheidx]]=kernel_cache.time; // lru
627  return &kernel_cache.buffer[((KERNELCACHE_IDX) kernel_cache.activenum)*kernel_cache.index[cacheidx]];
628 }
629 #endif //USE_SVMLIGHT
630 
631 void CKernel::load(CFile* loader)
632 {
635 }
636 
637 void CKernel::save(CFile* writer)
638 {
639  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
641  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
643 }
644 
646 {
647  if (rhs!=lhs)
648  SG_UNREF(rhs);
649  rhs = NULL;
650  num_rhs=0;
651 
652  SG_UNREF(lhs);
653  lhs = NULL;
654  num_lhs=0;
655  lhs_equals_rhs=false;
656 
657 #ifdef USE_SVMLIGHT
658  cache_reset();
659 #endif //USE_SVMLIGHT
660 }
661 
663 {
664  if (rhs==lhs)
665  rhs=NULL;
666  SG_UNREF(lhs);
667  lhs = NULL;
668  num_lhs=0;
669  lhs_equals_rhs=false;
670 #ifdef USE_SVMLIGHT
671  cache_reset();
672 #endif //USE_SVMLIGHT
673 }
674 
677 {
678  if (rhs!=lhs)
679  SG_UNREF(rhs);
680  rhs = NULL;
681  num_rhs=0;
682  lhs_equals_rhs=false;
683 
684 #ifdef USE_SVMLIGHT
685  cache_reset();
686 #endif //USE_SVMLIGHT
687 }
688 
689 #define ENUM_CASE(n) case n: SG_INFO(#n " "); break;
690 
692 {
693  SG_INFO( "%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
695  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
696  "SLOWBUTMEMEFFICIENT");
697 
698  switch (get_kernel_type())
699  {
758  }
759 
760  switch (get_feature_class())
761  {
772  ENUM_CASE(C_WD)
780  }
781 
782  switch (get_feature_type())
783  {
798  }
799  SG_INFO( "\n");
800 }
801 #undef ENUM_CASE
802 
804  int32_t count, int32_t *IDX, float64_t * weights)
805 {
806  SG_ERROR( "kernel does not support linadd optimization\n");
807  return false ;
808 }
809 
811 {
812  SG_ERROR( "kernel does not support linadd optimization\n");
813  return false;
814 }
815 
817 {
818  SG_ERROR( "kernel does not support linadd optimization\n");
819  return 0;
820 }
821 
823  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
824  int32_t* IDX, float64_t* weights, float64_t factor)
825 {
826  SG_ERROR( "kernel does not support batch computation\n");
827 }
828 
829 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
830 {
831  SG_ERROR( "kernel does not support linadd optimization, add_to_normal not implemented\n");
832 }
833 
835 {
836  SG_ERROR( "kernel does not support linadd optimization, clear_normal not implemented\n");
837 }
838 
840 {
841  return 1;
842 }
843 
845  int32_t vector_idx, float64_t * subkernel_contrib)
846 {
847  SG_ERROR( "kernel compute_by_subkernel not implemented\n");
848 }
849 
850 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
851 {
852  num_weights=1 ;
853  return &combined_kernel_weight ;
854 }
855 
857 {
858  ASSERT(weights.vector);
859  if (weights.vlen!=1)
860  SG_ERROR( "number of subkernel weights should be one ...\n");
861 
862  combined_kernel_weight = weights.vector[0] ;
863 }
864 
866 {
867  int32_t num_suppvec=svm->get_num_support_vectors();
868  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
869  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
870 
871  for (int32_t i=0; i<num_suppvec; i++)
872  {
873  sv_idx[i] = svm->get_support_vector(i);
874  sv_weight[i] = svm->get_alpha(i);
875  }
876  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
877 
878  SG_FREE(sv_idx);
879  SG_FREE(sv_weight);
880  return ret;
881 }
882 
884 {
886  if (lhs_equals_rhs)
887  rhs=lhs;
888 }
889 
891 {
893 
894  if (lhs_equals_rhs)
895  rhs=NULL;
896 }
897 
899 {
901 
902  if (lhs_equals_rhs)
903  rhs=lhs;
904 }
905 
907  SG_ADD(&cache_size, "cache_size",
908  "Cache size in MB.", MS_NOT_AVAILABLE);
909  SG_ADD((CSGObject**) &lhs, "lhs",
910  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
911  SG_ADD((CSGObject**) &rhs, "rhs",
912  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
913  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
914  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
915  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
917  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
919  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
920  "Combined kernel weight.", MS_AVAILABLE);
921  SG_ADD(&optimization_initialized, "optimization_initialized",
922  "Optimization is initialized.", MS_NOT_AVAILABLE);
923  SG_ADD((machine_int_t*) &opt_type, "opt_type",
924  "Optimization type.", MS_NOT_AVAILABLE);
925  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
926  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
927  MS_AVAILABLE);
928 }
929 
930 
931 void CKernel::init()
932 {
933  cache_size=10;
934  kernel_matrix=NULL;
935  lhs=NULL;
936  rhs=NULL;
937  num_lhs=0;
938  num_rhs=0;
943  normalizer=NULL;
944 
945 #ifdef USE_SVMLIGHT
946  memset(&kernel_cache, 0x0, sizeof(KERNEL_CACHE));
947 #endif //USE_SVMLIGHT
948 
950 }
951 
953  CSGObject* obj, index_t index)
954 {
955  return SGMatrix<float64_t>();
956 }

SHOGUN Machine Learning Toolbox - Documentation