SHOGUN  6.1.3
CombinedKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/Signal.h>
15 #include <shogun/base/Parallel.h>
17 #include <shogun/kernel/Kernel.h>
21 #include <string.h>
24 
25 using namespace shogun;
26 using namespace Eigen;
27 
28 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
29 : CKernel(size), append_subkernel_weights(asw)
30 {
31  init();
32 
34  SG_INFO("(subkernel weights are appended)\n")
35 
36  SG_INFO("Combined kernel created (%p)\n", this)
37 }
38 
40 {
41  SG_FREE(subkernel_weights_buffer);
43 
44  cleanup();
46 
47  SG_INFO("Combined kernel deleted (%p).\n", this)
48 }
49 
51 {
52  weight_update=true;
55 
56  Map<VectorXd> eigen_wt(wt.vector, wt.vlen);
58 
59  // log_sum_exp trick
60  float64_t max_coeff=eigen_log_wt.maxCoeff();
61  VectorXd tmp = eigen_log_wt.array() - max_coeff;
62  float64_t sum = CMath::log(tmp.array().exp().sum());
63  eigen_wt = tmp.array() - sum;
64  eigen_wt = eigen_wt.array().exp();
66 }
67 
68 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
69 {
71  {
73  }
74 
75  /* if the specified features are not combined features, but a single other
76  * feature type, assume that the caller wants to use all kernels on these */
77  if (l && r && l->get_feature_class()==r->get_feature_class() &&
80  {
81  SG_DEBUG("Initialising combined kernel's combined features with the "
82  "same instance from parameters\n");
83  /* construct combined features with each element being the parameter */
84  CCombinedFeatures* combined_l=new CCombinedFeatures();
85  CCombinedFeatures* combined_r=new CCombinedFeatures();
86  for (index_t i=0; i<get_num_subkernels(); ++i)
87  {
88  combined_l->append_feature_obj(l);
89  combined_r->append_feature_obj(r);
90  }
91 
92  /* recursive call with constructed combined kernel */
93  return init(combined_l, combined_r);
94  }
95 
96  CKernel::init(l,r);
97  REQUIRE(l->get_feature_class()==C_COMBINED, "%s::init(): LHS features are"
98  " of class %s but need to be combined features!\n",
99  get_name(), l->get_name());
100  REQUIRE(r->get_feature_class()==C_COMBINED, "%s::init(): RHS features are"
101  " of class %s but need to be combined features!\n",
102  get_name(), r->get_name());
105 
106  CFeatures* lf=NULL;
107  CFeatures* rf=NULL;
108  CKernel* k=NULL;
109 
110  bool result=true;
111  index_t f_idx = 0;
112 
113  SG_DEBUG("Starting for loop for kernels\n")
114  for (index_t k_idx=0; k_idx<get_num_kernels() && result; k_idx++)
115  {
116  k = get_kernel(k_idx);
117 
118  if (!k)
119  SG_ERROR("Kernel at position %d is NULL\n", k_idx);
120 
121  // skip over features - the custom kernel does not need any
122  if (k->get_kernel_type() != K_CUSTOM)
123  {
124  if (((CCombinedFeatures*)l)->get_num_feature_obj() > f_idx &&
125  ((CCombinedFeatures*)r)->get_num_feature_obj() > f_idx)
126  {
127  lf = ((CCombinedFeatures*)l)->get_feature_obj(f_idx);
128  rf = ((CCombinedFeatures*)r)->get_feature_obj(f_idx);
129  }
130 
131  f_idx++;
132  if (!lf || !rf)
133  {
134  SG_UNREF(lf);
135  SG_UNREF(rf);
136  SG_UNREF(k);
137  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
138  }
139 
140  SG_DEBUG("Initializing 0x%p - \"%s\"\n", this, k->get_name())
141  result=k->init(lf,rf);
142  SG_UNREF(lf);
143  SG_UNREF(rf);
144 
145  if (!result)
146  break;
147  }
148  else
149  {
150  SG_DEBUG("Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name())
151  if (!k->has_features())
152  SG_ERROR("No kernel matrix was assigned to this Custom kernel\n")
153  if (k->get_num_vec_lhs() != num_lhs)
154  SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs())
155  if (k->get_num_vec_rhs() != num_rhs)
156  SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs())
157  }
158 
159  SG_UNREF(k);
160  }
161 
162  if (!result)
163  {
164  SG_INFO("CombinedKernel: Initialising the following kernel failed\n")
165  if (k)
166  {
167  k->list_kernel();
168  SG_UNREF(k);
169  }
170  else
171  SG_INFO("<NULL>\n")
172  return false;
173  }
174 
175  if ( ((CCombinedFeatures*) l)->get_num_feature_obj()<=0 ||
176  ((CCombinedFeatures*) l)->get_num_feature_obj() != ((CCombinedFeatures*) r)->get_num_feature_obj() )
177  SG_ERROR("CombinedKernel: Number of features/kernels does not match - bailing out\n")
178 
179  init_normalizer();
180  initialized=true;
181  return true;
182 }
183 
185 {
187 
188  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
189  {
190  CKernel* k = get_kernel(k_idx);
191  if (k->get_kernel_type() != K_CUSTOM)
192  k->remove_lhs();
193 
194  SG_UNREF(k);
195  }
197 
198  num_lhs=0;
199 }
200 
202 {
204 
205  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
206  {
207  CKernel* k = get_kernel(k_idx);
208  if (k->get_kernel_type() != K_CUSTOM)
209  k->remove_rhs();
210 
211  SG_UNREF(k);
212  }
214 
215  num_rhs=0;
216 }
217 
219 {
221 
222  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
223  {
224  CKernel* k = get_kernel(k_idx);
225  if (k->get_kernel_type() != K_CUSTOM)
226  k->remove_lhs_and_rhs();
227 
228  SG_UNREF(k);
229  }
230 
232 
233  num_lhs=0;
234  num_rhs=0;
235 }
236 
238 {
239  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
240  {
241  CKernel* k = get_kernel(k_idx);
242  k->cleanup();
243  SG_UNREF(k);
244  }
245 
247 
249 
250  num_lhs=0;
251  num_rhs=0;
252 }
253 
255 {
256  SG_INFO("BEGIN COMBINED KERNEL LIST - ")
257  this->list_kernel();
258 
259  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
260  {
261  CKernel* k = get_kernel(k_idx);
262  k->list_kernel();
263  SG_UNREF(k);
264  }
265  SG_INFO("END COMBINED KERNEL LIST - ")
266 }
267 
268 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
269 {
270  float64_t result=0;
271  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
272  {
273  CKernel* k = get_kernel(k_idx);
274  if (k->get_combined_kernel_weight()!=0)
275  result += k->get_combined_kernel_weight() * k->kernel(x,y);
276  SG_UNREF(k);
277  }
278 
279  return result;
280 }
281 
283  int32_t count, int32_t *IDX, float64_t *weights)
284 {
285  SG_DEBUG("initializing CCombinedKernel optimization\n")
286 
288 
289  bool have_non_optimizable=false;
290 
291  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
292  {
293  CKernel* k = get_kernel(k_idx);
294 
295  bool ret=true;
296 
297  if (k && k->has_property(KP_LINADD))
298  ret=k->init_optimization(count, IDX, weights);
299  else
300  {
301  SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k)
302  have_non_optimizable=true;
303  }
304 
305  if (!ret)
306  {
307  have_non_optimizable=true;
308  SG_WARNING("init_optimization of kernel 0x%X failed\n", k)
309  }
310 
311  SG_UNREF(k);
312  }
313 
314  if (have_non_optimizable)
315  {
316  SG_WARNING("some kernels in the kernel-list are not optimized\n")
317 
318  sv_idx=SG_MALLOC(int32_t, count);
319  sv_weight=SG_MALLOC(float64_t, count);
320  sv_count=count;
321  for (int32_t i=0; i<count; i++)
322  {
323  sv_idx[i]=IDX[i];
324  sv_weight[i]=weights[i];
325  }
326  }
327  set_is_initialized(true);
328 
329  return true;
330 }
331 
333 {
334  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
335  {
336  CKernel* k = get_kernel(k_idx);
337  if (k->has_property(KP_LINADD))
338  k->delete_optimization();
339 
340  SG_UNREF(k);
341  }
342 
343  SG_FREE(sv_idx);
344  sv_idx = NULL;
345 
346  SG_FREE(sv_weight);
347  sv_weight = NULL;
348 
349  sv_count = 0;
350  set_is_initialized(false);
351 
352  return true;
353 }
354 
356  int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
357  int32_t* IDX, float64_t* weights, float64_t factor)
358 {
359  ASSERT(num_vec<=get_num_vec_rhs())
360  ASSERT(num_vec>0)
361  ASSERT(vec_idx)
362  ASSERT(result)
363 
364  //we have to do the optimization business ourselves but lets
365  //make sure we start cleanly
367 
368  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
369  {
370  CKernel* k = get_kernel(k_idx);
371  if (k && k->has_property(KP_BATCHEVALUATION))
372  {
373  if (k->get_combined_kernel_weight()!=0)
374  k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
375  }
376  else
377  emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
378 
379  SG_UNREF(k);
380  }
381 
382  //clean up
384 }
385 
387  CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
388  int32_t num_suppvec, int32_t* IDX, float64_t* weights)
389 {
390  ASSERT(k)
391  ASSERT(result)
392 
393  if (k->has_property(KP_LINADD))
394  {
395  if (k->get_combined_kernel_weight()!=0)
396  {
397  k->init_optimization(num_suppvec, IDX, weights);
398 
399  #pragma omp parallel for
400  for (int32_t i=0; i<num_vec; ++i)
401  result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
402 
403  k->delete_optimization();
404  }
405  }
406  else
407  {
408  ASSERT(IDX!=NULL || num_suppvec==0)
409  ASSERT(weights!=NULL || num_suppvec==0)
410 
411  if (k->get_combined_kernel_weight()!=0)
412  { // compute the usual way for any non-optimized kernel
413  #pragma omp parallel for
414  for (int32_t i=0; i<num_vec; i++)
415  {
416  float64_t sub_result=0;
417  for (int32_t j=0; j<num_suppvec; j++)
418  sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
419 
420  result[i] += k->get_combined_kernel_weight()*sub_result;
421  }
422  }
423  }
424 }
425 
427 {
428  if (!get_is_initialized())
429  {
430  SG_ERROR("CCombinedKernel optimization not initialized\n")
431  return 0;
432  }
433 
434  float64_t result=0;
435 
436  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
437  {
438  CKernel* k = get_kernel(k_idx);
439  if (k->has_property(KP_LINADD) &&
440  k->get_is_initialized())
441  {
442  if (k->get_combined_kernel_weight()!=0)
443  {
444  result +=
446  }
447  }
448  else
449  {
450  ASSERT(sv_idx!=NULL || sv_count==0)
451  ASSERT(sv_weight!=NULL || sv_count==0)
452 
453  if (k->get_combined_kernel_weight()!=0)
454  { // compute the usual way for any non-optimized kernel
455  float64_t sub_result=0;
456  for (int32_t j=0; j<sv_count; j++)
457  sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
458 
459  result += k->get_combined_kernel_weight()*sub_result;
460  }
461  }
462 
463  SG_UNREF(k);
464  }
465 
466  return result;
467 }
468 
469 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
470 {
471  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
472  {
473  CKernel* k = get_kernel(k_idx);
474  k->add_to_normal(idx, weight);
475  SG_UNREF(k);
476  }
477  set_is_initialized(true) ;
478 }
479 
481 {
482  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
483  {
484  CKernel* k = get_kernel(k_idx);
485  k->clear_normal() ;
486  SG_UNREF(k);
487  }
488  set_is_initialized(true) ;
489 }
490 
492  int32_t idx, float64_t * subkernel_contrib)
493 {
495  {
496  int32_t i=0 ;
497  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
498  {
499  CKernel* k = get_kernel(k_idx);
500  int32_t num = -1 ;
501  k->get_subkernel_weights(num);
502  if (num>1)
503  k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
504  else
505  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
506 
507  SG_UNREF(k);
508  i += num ;
509  }
510  }
511  else
512  {
513  int32_t i=0 ;
514  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
515  {
516  CKernel* k = get_kernel(k_idx);
517  if (k->get_combined_kernel_weight()!=0)
518  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
519 
520  SG_UNREF(k);
521  i++ ;
522  }
523  }
524 }
525 
527 {
528  SG_DEBUG("entering CCombinedKernel::get_subkernel_weights()\n")
529 
530  num_weights = get_num_subkernels() ;
531  SG_FREE(subkernel_weights_buffer);
532  subkernel_weights_buffer = SG_MALLOC(float64_t, num_weights);
533 
535  {
536  SG_DEBUG("appending kernel weights\n")
537 
538  int32_t i=0 ;
539  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
540  {
541  CKernel* k = get_kernel(k_idx);
542  int32_t num = -1 ;
543  const float64_t *w = k->get_subkernel_weights(num);
544  ASSERT(num==k->get_num_subkernels())
545  for (int32_t j=0; j<num; j++)
546  subkernel_weights_buffer[i+j]=w[j] ;
547 
548  SG_UNREF(k);
549  i += num ;
550  }
551  }
552  else
553  {
554  SG_DEBUG("not appending kernel weights\n")
555  int32_t i=0 ;
556  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
557  {
558  CKernel* k = get_kernel(k_idx);
560 
561  SG_UNREF(k);
562  i++ ;
563  }
564  }
565 
566  SG_DEBUG("leaving CCombinedKernel::get_subkernel_weights()\n")
567  return subkernel_weights_buffer ;
568 }
569 
571 {
573  {
576  }
577 
578  int32_t num=0;
579  const float64_t* w=get_subkernel_weights(num);
580 
581  float64_t* weights = SG_MALLOC(float64_t, num);
582  for (int32_t i=0; i<num; i++)
583  weights[i] = w[i];
584 
585 
586  return SGVector<float64_t>(weights, num);
587 }
588 
590 {
592  {
593  int32_t i=0 ;
594  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
595  {
596  CKernel* k = get_kernel(k_idx);
597  int32_t num = k->get_num_subkernels() ;
598  ASSERT(i<weights.vlen)
599  k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num, false));
600 
601  SG_UNREF(k);
602  i += num ;
603  }
604  }
605  else
606  {
607  int32_t i=0 ;
608  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
609  {
610  CKernel* k = get_kernel(k_idx);
611  ASSERT(i<weights.vlen)
612  k->set_combined_kernel_weight(weights.vector[i]);
613 
614  SG_UNREF(k);
615  i++ ;
616  }
617  }
618 }
619 
621 {
622  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
623  {
624  CKernel* k = get_kernel(k_idx);
625  k->set_optimization_type(t);
626 
627  SG_UNREF(k);
628  }
629 
631 }
632 
634 {
635  if (get_num_kernels()==0)
636  return false;
637 
638  CDynamicObjectArray* new_kernel_array = new CDynamicObjectArray();
639 
640  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
641  {
642  CKernel* k = get_kernel(k_idx);
643  new_kernel_array->append_element(new CCustomKernel(k));
644 
645  SG_UNREF(k);
646  }
647 
649  kernel_array=new_kernel_array;
651 
652  return true;
653 }
654 
655 void CCombinedKernel::init()
656 {
657  sv_count=0;
658  sv_idx=NULL;
659  sv_weight=NULL;
661  initialized=false;
662 
666 
667  SG_ADD((CSGObject**) &kernel_array, "kernel_array", "Array of kernels.",
668  MS_AVAILABLE);
669  m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
670  "Support vector index.");
671  m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
672  "Support vector weights.");
673  SG_ADD(&append_subkernel_weights, "append_subkernel_weights",
674  "If subkernel weights are appended.", MS_AVAILABLE);
675  SG_ADD(&initialized, "initialized", "Whether kernel is ready to be used.",
677 
680  subkernel_log_weights[0] = 0;
681  SG_ADD(&subkernel_log_weights, "subkernel_log_weights",
682  "subkernel weights", MS_AVAILABLE, GRADIENT_AVAILABLE);
683  SG_ADD(&enable_subkernel_weight_opt, "enable_subkernel_weight_opt",
684  "enable subkernel weight opt", MS_NOT_AVAILABLE);
685 
686  weight_update = false;
687  SG_ADD(&weight_update, "weight_update",
688  "weight update", MS_NOT_AVAILABLE);
689 }
690 
692 {
693  weight_update = false;
698  for(index_t idx=0; idx<subkernel_log_weights.vlen; idx++)
699  {
700  ASSERT(subkernel_log_weights[idx]>0);//weight should be positive
701  subkernel_log_weights[idx]=CMath::log(subkernel_log_weights[idx]);//in log domain
702  }
703 }
704 
706  const TParameter* param, index_t index)
707 {
708  SGMatrix<float64_t> result;
709 
710  if (!strcmp(param->m_name, "combined_kernel_weight"))
711  {
713  {
714  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
715  {
716  CKernel* k=get_kernel(k_idx);
717  result=k->get_parameter_gradient(param, index);
718 
719  SG_UNREF(k);
720 
721  if (result.num_cols*result.num_rows>0)
722  return result;
723  }
724  }
725  else
726  {
727  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
728  {
729  CKernel* k=get_kernel(k_idx);
730  result=k->get_kernel_matrix();
731 
732  SG_UNREF(k);
733 
734  return result;
735  }
736  }
737  }
738  else
739  {
740  if (!strcmp(param->m_name, "subkernel_log_weights"))
741  {
743  {
744  ASSERT(index>=0 && index<subkernel_log_weights.vlen);
745  CKernel* k=get_kernel(index);
746  result=k->get_kernel_matrix();
747  SG_UNREF(k);
748  if (weight_update)
749  weight_update = false;
750  float64_t factor = 1.0;
752  // log_sum_exp trick
753  float64_t max_coeff = eigen_log_wt.maxCoeff();
754  VectorXd tmp = eigen_log_wt.array() - max_coeff;
755  float64_t log_sum = CMath::log(tmp.array().exp().sum());
756 
757  factor = subkernel_log_weights[index] - max_coeff - log_sum;
758  factor = CMath::exp(factor) - CMath::exp(factor*2.0);
759 
760  Map<MatrixXd> eigen_res(result.matrix, result.num_rows, result.num_cols);
761  eigen_res = eigen_res * factor;
762  }
763  else
764  {
765  CKernel* k=get_kernel(0);
766  result=k->get_kernel_matrix();
767  SG_UNREF(k);
768  result.zero();
769  }
770  return result;
771  }
772  else
773  {
774  float64_t coeff;
775  for (index_t k_idx=0; k_idx<get_num_kernels(); k_idx++)
776  {
777  CKernel* k=get_kernel(k_idx);
778  SGMatrix<float64_t> derivative=
779  k->get_parameter_gradient(param, index);
780 
781  coeff=1.0;
782 
784  coeff=k->get_combined_kernel_weight();
785 
786  for (index_t g=0; g<derivative.num_rows; g++)
787  {
788  for (index_t h=0; h<derivative.num_cols; h++)
789  derivative(g,h)*=coeff;
790  }
791 
792  if (derivative.num_cols*derivative.num_rows>0)
793  {
794  if (result.num_cols==0 && result.num_rows==0)
795  result=derivative;
796  else
797  {
798  for (index_t g=0; g<derivative.num_rows; g++)
799  {
800  for (index_t h=0; h<derivative.num_cols; h++)
801  result(g,h)+=derivative(g,h);
802  }
803  }
804  }
805 
806  SG_UNREF(k);
807  }
808  }
809  }
810 
811  return result;
812 }
813 
815 {
816  if (kernel->get_kernel_type()!=K_COMBINED)
817  {
818  SG_SERROR("CCombinedKernel::obtain_from_generic(): provided kernel is "
819  "not of type CCombinedKernel!\n");
820  }
821 
822  /* since an additional reference is returned */
823  SG_REF(kernel);
824  return (CCombinedKernel*)kernel;
825 }
826 
828 {
829  CList* return_list = new CList(true);
830  SG_REF(return_list);
831 
832  if (!kernel_list)
833  return return_list;
834 
835  if (kernel_list->get_num_elements()==0)
836  return return_list;
837 
838  int32_t num_combinations = 1;
839  int32_t list_index = 0;
840 
841  /* calculation of total combinations */
842  CSGObject* list = kernel_list->get_first_element();
843  while (list)
844  {
845  CList* c_list= dynamic_cast<CList* >(list);
846  if (!c_list)
847  {
848  SG_SERROR("CCombinedKernel::combine_kernels() : Failed to cast list of type "
849  "%s to type CList\n", list->get_name());
850  }
851 
852  if (c_list->get_num_elements()==0)
853  {
854  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position %d "
855  "is empty.\n", list_index);
856  }
857 
858  num_combinations *= c_list->get_num_elements();
859 
860  if (kernel_list->get_delete_data())
861  SG_UNREF(list);
862 
863  list = kernel_list->get_next_element();
864  ++list_index;
865  }
866 
867  /* creation of CCombinedKernels */
868  CDynamicObjectArray kernel_array(num_combinations);
869  for (index_t i=0; i<num_combinations; ++i)
870  {
871  CCombinedKernel* c_kernel = new CCombinedKernel();
872  return_list->append_element(c_kernel);
873  kernel_array.push_back(c_kernel);
874  }
875 
876  /* first pass */
877  list = kernel_list->get_first_element();
878  CList* c_list = dynamic_cast<CList* >(list);
879 
880  /* kernel index in the list */
881  index_t kernel_index = 0;
882 
883  /* here we duplicate the first list in the following form
884  * a,b,c,d, a,b,c,d ...... a,b,c,d ---- for a total of num_combinations elements
885  */
886  EKernelType prev_kernel_type = K_UNKNOWN;
887  bool first_kernel = true;
888  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
889  {
890  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
891 
892  if (first_kernel)
893  first_kernel = false;
894  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
895  {
896  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
897  "0 contains different types of kernels\n");
898  }
899 
900  prev_kernel_type = c_kernel->get_kernel_type();
901 
902  for (index_t index=kernel_index; index<num_combinations; index+=c_list->get_num_elements())
903  {
904  CCombinedKernel* comb_kernel =
905  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(index));
906  comb_kernel->append_kernel(c_kernel);
907  SG_UNREF(comb_kernel);
908  }
909  ++kernel_index;
910  if (c_list->get_delete_data())
911  SG_UNREF(kernel);
912  }
913 
914  if (kernel_list->get_delete_data())
915  SG_UNREF(list);
916 
917  /* how often each kernel of the sub-list must appear */
918  int32_t freq = c_list->get_num_elements();
919 
920  /* in this loop we replicate each kernel freq times
921  * until we assign to all the CombinedKernels a sub-kernel from this list
922  * That is for num_combinations */
923  list = kernel_list->get_next_element();
924  list_index = 1;
925  while (list)
926  {
927  c_list = dynamic_cast<CList* >(list);
928 
929  /* index of kernel in the list */
930  kernel_index = 0;
931  first_kernel = true;
932  for (CSGObject* kernel=c_list->get_first_element(); kernel; kernel=c_list->get_next_element())
933  {
934  CKernel* c_kernel = dynamic_cast<CKernel* >(kernel);
935 
936  if (first_kernel)
937  first_kernel = false;
938  else if (c_kernel->get_kernel_type()!=prev_kernel_type)
939  {
940  SG_SERROR("CCombinedKernel::combine_kernels() : Sub-list in position "
941  "%d contains different types of kernels\n", list_index);
942  }
943 
944  prev_kernel_type = c_kernel->get_kernel_type();
945 
946  /* moves the index so that we keep filling in, the way we do, until we reach the end of the list of combinedkernels */
947  for (index_t base=kernel_index*freq; base<num_combinations; base+=c_list->get_num_elements()*freq)
948  {
949  /* inserts freq consecutives times the current kernel */
950  for (index_t index=0; index<freq; ++index)
951  {
952  CCombinedKernel* comb_kernel =
953  dynamic_cast<CCombinedKernel* >(kernel_array.get_element(base+index));
954  comb_kernel->append_kernel(c_kernel);
955  SG_UNREF(comb_kernel);
956  }
957  }
958  ++kernel_index;
959 
960  if (c_list->get_delete_data())
961  SG_UNREF(kernel);
962  }
963 
964  freq *= c_list->get_num_elements();
965  if (kernel_list->get_delete_data())
966  SG_UNREF(list);
967  list = kernel_list->get_next_element();
968  ++list_index;
969  }
970 
971  return return_list;
972 }
virtual void clear_normal()
Definition: Kernel.cpp:835
virtual const char * get_name() const =0
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:97
virtual void compute_by_subkernel(int32_t idx, float64_t *subkernel_contrib)
#define SG_INFO(...)
Definition: SGIO.h:117
virtual void cleanup()
Definition: Kernel.cpp:172
virtual const char * get_name() const
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
CSGObject * get_next_element()
Definition: List.h:185
SGVector< float64_t > subkernel_log_weights
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:845
virtual void set_subkernel_weights(SGVector< float64_t > weights)
int32_t index_t
Definition: common.h:72
int32_t num_rhs
number of feature vectors on right hand side
The Custom Kernel allows for custom user provided kernel matrices.
Definition: CustomKernel.h:36
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
bool append_kernel(CKernel *k)
Definition: SGMatrix.h:25
parameter struct
#define SG_ERROR(...)
Definition: SGIO.h:128
#define REQUIRE(x,...)
Definition: SGIO.h:181
void set_is_initialized(bool p_init)
virtual bool delete_optimization()
Definition: Kernel.cpp:811
CDynamicObjectArray * kernel_array
Parameter * m_parameters
Definition: SGObject.h:609
float64_t kernel(int32_t idx_a, int32_t idx_b)
virtual void set_optimization_type(EOptimizationType t)
bool get_delete_data()
Definition: List.h:575
virtual void set_optimization_type(EOptimizationType t)
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:669
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
#define SG_REF(x)
Definition: SGObject.h:52
bool get_is_initialized()
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:636
bool has_property(EKernelProperty p)
CSGObject * get_first_element()
Definition: List.h:151
virtual void remove_lhs_and_rhs()
virtual SGVector< float64_t > get_subkernel_weights()
#define ASSERT(x)
Definition: SGIO.h:176
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:124
CKernel * get_kernel(int32_t idx)
double float64_t
Definition: common.h:60
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
void set_combined_kernel_weight(float64_t nw)
virtual float64_t compute(int32_t x, int32_t y)
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:817
index_t num_rows
Definition: SGMatrix.h:495
void list_kernel()
Definition: Kernel.cpp:684
float64_t get_combined_kernel_weight()
static CList * combine_kernels(CList *kernel_list)
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
Definition: Kernel.cpp:851
virtual EFeatureClass get_feature_class() const =0
int32_t get_num_elements()
Definition: List.h:145
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
index_t num_cols
Definition: SGMatrix.h:497
int32_t num_lhs
number of feature vectors on left hand side
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:864
virtual bool init_normalizer()
Definition: Kernel.cpp:167
#define SG_UNREF(x)
Definition: SGObject.h:53
void add_vector(bool **param, index_t *length, const char *name, const char *description="")
Definition: Parameter.cpp:335
#define SG_DEBUG(...)
Definition: SGIO.h:106
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:823
T sum(const Container< T > &a, bool no_diag=false)
static CCombinedKernel * obtain_from_generic(CKernel *kernel)
EOptimizationType
Definition: kernel/Kernel.h:49
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:804
virtual bool delete_optimization()
The class Features is the base class of all feature objects.
Definition: Features.h:69
bool append_element(CSGObject *data)
Definition: List.h:331
#define SG_SERROR(...)
Definition: SGIO.h:164
static float64_t exp(float64_t x)
Definition: Math.h:551
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
virtual void add_to_normal(int32_t idx, float64_t weight)
void emulate_compute_batch(CKernel *k, int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *weights)
static float64_t log(float64_t v)
Definition: Math.h:714
virtual void remove_lhs()
Definition: Kernel.cpp:655
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:840
virtual float64_t compute_optimized(int32_t idx)
static float base
Definition: JLCoverTree.h:89
The Kernel base class.
CSGObject * get_element(int32_t index) const
float64_t * subkernel_weights_buffer
#define SG_WARNING(...)
Definition: SGIO.h:127
#define SG_ADD(...)
Definition: SGObject.h:93
virtual bool has_features()
The class CombinedFeatures is used to combine a number of of feature objects into a single CombinedFe...
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:830
virtual EFeatureType get_feature_type() const =0
Class List implements a doubly connected list for low-level-objects.
Definition: List.h:84
bool append_feature_obj(CFeatures *obj)
index_t vlen
Definition: SGVector.h:571
CCombinedKernel(int32_t size=10, bool append_subkernel_weights=false)
bool append_element(CSGObject *e)
virtual void init_subkernel_weights()
virtual void enable_subkernel_weight_learning()

SHOGUN Machine Learning Toolbox - Documentation