SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LaplacianInferenceMethod.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Copyright (C) 2012 Jacob Walker
8  *
9  * Code adapted from Gaussian Process Machine Learning Toolbox
10  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
11  * This code specifically adapted from infLaplace.m
12  *
13  */
14 #include <shogun/lib/config.h>
15 
16 #ifdef HAVE_LAPACK
17 #ifdef HAVE_EIGEN3
18 
29 
30 using namespace shogun;
31 using namespace Eigen;
32 
33 namespace shogun
34 {
35  /*Wrapper class used for the Brent minimizer
36  *
37  */
38  class Psi_line : public func_base
39  {
40  public:
41  Eigen::Map<Eigen::VectorXd>* alpha;
42  Eigen::VectorXd* dalpha;
43  Eigen::Map<Eigen::MatrixXd>* K;
46  Eigen::Map<Eigen::VectorXd>* dl2;
53 
54  Eigen::VectorXd start_alpha;
55 
56  virtual double operator() (double x)
57  {
58  Eigen::Map<Eigen::VectorXd> eigen_f(f->vector, f->vlen);
59  Eigen::Map<Eigen::VectorXd> eigen_m(m->vector, m->vlen);
60 
61  *alpha = start_alpha + x*(*dalpha);
62  (eigen_f) = (*K)*(*alpha)*scale*scale+(eigen_m);
63 
64 
65  for (index_t i = 0; i < eigen_f.rows(); i++)
66  (*f)[i] = eigen_f[i];
67 
68  (*dl1) = lik->get_log_probability_derivative_f(lab, (*f), 1);
69  (*mW) = lik->get_log_probability_derivative_f(lab, (*f), 2);
70  float64_t result = ((*alpha).dot(((eigen_f)-(eigen_m))))/2.0;
71 
72  for (index_t i = 0; i < (*mW).vlen; i++)
73  (*mW)[i] = -(*mW)[i];
74 
75 
76 
77  result -= lik->get_log_probability_f(lab, *f);
78 
79  return result;
80  }
81  };
82 }
83 
85 {
86  init();
87  update_all();
89 }
90 
92  CFeatures* feat,
93  CMeanFunction* m, CLabels* lab, CLikelihoodModel* mod) :
94  CInferenceMethod(kern, feat, m, lab, mod)
95 {
96  init();
97  update_all();
98 }
99 
100 void CLaplacianInferenceMethod::init()
101 {
102  m_latent_features = NULL;
103  m_max_itr = 30;
104  m_opt_tolerance = 1e-6;
105  m_tolerance = 1e-8;
106  m_max = 5;
107 }
108 
110 {
111 }
112 
114 {
115  if (m_labels)
117  ((CRegressionLabels*) m_labels)->get_labels().clone();
118 
121  {
123  ((CDotFeatures*)m_features)->get_computed_dot_feature_matrix();
124 
125  }
126 
128  {
129  CDotFeatures* feat =
131  get_first_feature_obj();
132 
133  if (feat->get_num_vectors())
135 
136  SG_UNREF(feat);
137  }
138 
140 
141  if (m_kernel)
143 
145  {
146  update_alpha();
147  update_chol();
148  }
149 }
150 
151 void CLaplacianInferenceMethod::check_members()
152 {
153  if (!m_labels)
154  SG_ERROR("No labels set\n");
155 
157  SG_ERROR("Expected RegressionLabels\n");
158 
159  if (!m_features)
160  SG_ERROR("No features set!\n");
161 
163  SG_ERROR("Number of training vectors does not match number of labels\n");
164 
166  {
167  CDotFeatures* feat =
169  get_first_feature_obj();
170 
171  if (!feat->has_property(FP_DOT))
172  SG_ERROR("Specified features are not of type CFeatures\n");
173 
174  if (feat->get_feature_class() != C_DENSE)
175  SG_ERROR("Expected Simple Features\n");
176 
177  if (feat->get_feature_type() != F_DREAL)
178  SG_ERROR("Expected Real Features\n");
179 
180  SG_UNREF(feat);
181  }
182 
183  else
184  {
186  SG_ERROR("Specified features are not of type CFeatures\n");
187 
189  SG_ERROR("Expected Simple Features\n");
190 
192  SG_ERROR("Expected Real Features\n");
193  }
194 
195  if (!m_kernel)
196  SG_ERROR( "No kernel assigned!\n");
197 
198  if (!m_mean)
199  SG_ERROR( "No mean function assigned!\n");
200 
201 }
202 
205  CSGObject*>& para_dict)
206 {
207  check_members();
208 
210  update_all();
211 
212  MatrixXd Z(m_L.num_rows, m_L.num_cols);
213 
214  Map<VectorXd> eigen_dlp(dlp.vector, dlp.vlen);
215 
216  for (index_t i = 0; i < m_L.num_rows; i++)
217  {
218  for (index_t j = 0; j < m_L.num_cols; j++)
219  Z(i,j) = m_L(i,j);
220  }
221 
222  MatrixXd sW_temp(sW.vlen, m_ktrtr.num_cols);
223  VectorXd sum(1);
224  sum[0] = 0;
225 
226 
227  for (index_t i = 0; i < sW.vlen; i++)
228  {
229  for (index_t j = 0; j < m_ktrtr.num_cols; j++)
230  sW_temp(i,j) = sW[i];
231  }
232 
233  VectorXd g;
234 
235  Map<VectorXd> eigen_W(W.vector, W.vlen);
236 
237  Map<MatrixXd> eigen_temp_kernel(temp_kernel.matrix,
238  temp_kernel.num_rows, temp_kernel.num_cols);
239 
240  if (eigen_W.minCoeff() < 0)
241  {
242  Z = -Z;
243 
244  MatrixXd A = MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols);
245 
246  MatrixXd temp_diagonal(sW.vlen, sW.vlen);
247  temp_diagonal.setZero(sW.vlen, sW.vlen);
248 
249  for (index_t s = 0; s < temp_diagonal.rows(); s++)
250  {
251  for (index_t r = 0; r < temp_diagonal.cols(); r++)
252  temp_diagonal(r,s) = W[s];
253  }
254 
255  A = A + eigen_temp_kernel*m_scale*m_scale*temp_diagonal;
256 
257  FullPivLU<MatrixXd> lu(A);
258 
259  MatrixXd temp_matrix =
260  lu.inverse().cwiseProduct(eigen_temp_kernel*m_scale*m_scale);
261 
262  VectorXd temp_sum(temp_matrix.rows());
263 
264  for (index_t i = 0; i < temp_matrix.rows(); i++)
265  {
266  for (index_t j = 0; j < temp_matrix.cols(); j++)
267  temp_sum[i] += temp_matrix(j,i);
268  }
269 
270  g = temp_sum/2.0;
271  }
272 
273  else
274  {
275  MatrixXd C = Z.transpose().colPivHouseholderQr().solve(
276  sW_temp.cwiseProduct(eigen_temp_kernel*m_scale*m_scale));
277 
278  MatrixXd temp_diagonal(sW.vlen, sW.vlen);
279  temp_diagonal.setZero(sW.vlen, sW.vlen);
280 
281  for (index_t s = 0; s < sW.vlen; s++)
282  temp_diagonal(s,s) = sW[s];
283 
284  MatrixXd temp = Z.transpose();
285 
286  Z = Z.transpose().colPivHouseholderQr().solve(temp_diagonal);
287 
288  Z = temp.transpose().colPivHouseholderQr().solve(Z);
289 
290  for (index_t s = 0; s < Z.rows(); s++)
291  {
292  for (index_t r = 0; r < Z.cols(); r++)
293  Z(s,r) *= sW[s];
294  }
295 
296  VectorXd temp_sum(C.rows());
297 
298  temp_sum.setZero(C.rows());
299 
300  for (index_t i = 0; i < C.rows(); i++)
301  {
302  for (index_t j = 0; j < C.cols(); j++)
303  temp_sum[i] += C(j,i)*C(j,i);
304  }
305 
306  g = (eigen_temp_kernel.diagonal()*m_scale*m_scale-temp_sum)/2.0;
307  }
308 
309  Map<VectorXd> eigen_d3lp(d3lp.vector, d3lp.vlen);
310 
311  VectorXd dfhat = g.cwiseProduct(eigen_d3lp);
312 
316 
318  3+para_dict.get_num_elements(),
319  3+para_dict.get_num_elements());
320 
321  for (index_t i = 0; i < para_dict.get_num_elements(); i++)
322  {
323  shogun::CMapNode<TParameter*, CSGObject*>* node =
324  para_dict.get_node_ptr(i);
325 
326  TParameter* param = node->key;
327  CSGObject* obj = node->data;
328 
329  index_t length = 1;
330 
331  if ((param->m_datatype.m_ctype== CT_VECTOR ||
332  param->m_datatype.m_ctype == CT_SGVECTOR) &&
333  param->m_datatype.m_length_y != NULL)
334  length = *(param->m_datatype.m_length_y);
335 
336  SGVector<float64_t> variables(length);
337 
338  bool deriv_found = false;
339 
340  Map<VectorXd> eigen_temp_alpha(temp_alpha.vector,
341  temp_alpha.vlen);
342 
343  for (index_t h = 0; h < length; h++)
344  {
345 
346  SGMatrix<float64_t> deriv;
347  SGVector<float64_t> mean_derivatives;
348  VectorXd mean_dev_temp;
349  SGVector<float64_t> lik_first_deriv;
350  SGVector<float64_t> lik_second_deriv;
351 
352  if (param->m_datatype.m_ctype == CT_VECTOR ||
353  param->m_datatype.m_ctype == CT_SGVECTOR)
354  {
355  deriv = m_kernel->get_parameter_gradient(param, obj);
356 
357  lik_first_deriv = m_model->get_first_derivative(
358  (CRegressionLabels*)m_labels, param, obj, function);
359 
360  lik_second_deriv = m_model->get_second_derivative(
361  (CRegressionLabels*)m_labels, param, obj, function);
362 
363  mean_derivatives = m_mean->get_parameter_derivative(
364  param, obj, m_feature_matrix, h);
365 
366  for (index_t d = 0; d < mean_derivatives.vlen; d++)
367  mean_dev_temp[d] = mean_derivatives[d];
368  }
369 
370  else
371  {
372  mean_derivatives = m_mean->get_parameter_derivative(
373  param, obj, m_feature_matrix);
374 
375  for (index_t d = 0; d < mean_derivatives.vlen; d++)
376  mean_dev_temp[d] = mean_derivatives[d];
377 
378  deriv = m_kernel->get_parameter_gradient(param, obj);
379 
380  lik_first_deriv = m_model->get_first_derivative(
381  (CRegressionLabels*)m_labels, param, obj, function);
382 
383  lik_second_deriv = m_model->get_second_derivative(
384  (CRegressionLabels*)m_labels, param, obj, function);
385  }
386 
387  if (deriv.num_cols*deriv.num_rows > 0)
388  {
389  MatrixXd dK(deriv.num_cols, deriv.num_rows);
390 
391  for (index_t d = 0; d < deriv.num_rows; d++)
392  {
393  for (index_t s = 0; s < deriv.num_cols; s++)
394  dK(d,s) = deriv(d,s);
395  }
396 
397 
398  sum[0] = (Z.cwiseProduct(dK)).sum()/2.0;
399 
400 
401  sum = sum - eigen_temp_alpha.transpose()*dK*eigen_temp_alpha/2.0;
402 
403  VectorXd b = dK*eigen_dlp;
404 
405  sum = sum -
406  dfhat.transpose()*(b-eigen_temp_kernel*(Z*b)*m_scale*m_scale);
407 
408  variables[h] = sum[0];
409 
410  deriv_found = true;
411  }
412 
413  else if (mean_derivatives.vlen > 0)
414  {
415  sum = -eigen_temp_alpha.transpose()*mean_dev_temp;
416  sum = sum - dfhat.transpose()*(mean_dev_temp-eigen_temp_kernel*
417  (Z*mean_dev_temp)*m_scale*m_scale);
418  variables[h] = sum[0];
419  deriv_found = true;
420  }
421 
422  else if (lik_first_deriv[0]+lik_second_deriv[0] != CMath::INFTY)
423  {
424  Map<VectorXd> eigen_fd(lik_first_deriv.vector, lik_first_deriv.vlen);
425 
426  Map<VectorXd> eigen_sd(lik_second_deriv.vector, lik_second_deriv.vlen);
427 
428  sum[0] = -g.dot(eigen_sd);
429  sum[0] = sum[0] - eigen_fd.sum();
430  variables[h] = sum[0];
431  deriv_found = true;
432  }
433 
434  }
435 
436  if (deriv_found)
437  gradient.add(param, variables);
438 
439  }
440 
441  TParameter* param;
442  index_t index = get_modsel_param_index("scale");
444 
445  MatrixXd dK(m_ktrtr.num_cols, m_ktrtr.num_rows);
446 
447  for (index_t d = 0; d < m_ktrtr.num_rows; d++)
448  {
449  for (index_t s = 0; s < m_ktrtr.num_cols; s++)
450  dK(d,s) = m_ktrtr(d,s)*m_scale*2.0;;
451  }
452 
453  Map<VectorXd> eigen_temp_alpha(temp_alpha.vector,
454  temp_alpha.vlen);
455 
456  sum[0] = (Z.cwiseProduct(dK)).sum()/2.0;
457 
458  sum = sum - eigen_temp_alpha.transpose()*dK*eigen_temp_alpha/2.0;
459 
460  VectorXd b = dK*eigen_dlp;
461 
462  sum = sum - dfhat.transpose()*(b-eigen_temp_kernel*(Z*b)*m_scale*m_scale);
463 
464  SGVector<float64_t> scale(1);
465 
466  scale[0] = sum[0];
467 
468  gradient.add(param, scale);
469  para_dict.add(param, this);
470 
471  return gradient;
472 }
473 
475 {
476  SGVector<float64_t> result(sW.vlen);
477 
478  for (index_t i = 0; i < sW.vlen; i++)
479  result[i] = sW[i];
480 
481  return result;
482 }
483 
485 {
487  update_all();
488 
489  Map<VectorXd> eigen_temp_alpha(temp_alpha.vector, temp_alpha.vlen);
490 
491  Map<MatrixXd> eigen_temp_kernel(temp_kernel.matrix,
492 temp_kernel.num_rows, temp_kernel.num_cols);
493 
494  Map<VectorXd> eigen_function(function.vector,
495  function.vlen);
496 
497  Map<VectorXd> eigen_W(W.vector, W.vlen);
498 
499  Map<VectorXd> eigen_m_means(m_means.vector, m_means.vlen);
500 
501  if (eigen_W.minCoeff() < 0)
502  {
503  MatrixXd A = MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols);
504 
505  MatrixXd temp_diagonal(sW.vlen, sW.vlen);
506  temp_diagonal.setZero(sW.vlen, sW.vlen);
507 
508  for (index_t s = 0; s < sW.vlen; s++)
509  temp_diagonal(s,s) = sW[s];
510 
511  A = A + eigen_temp_kernel*m_scale*m_scale*temp_diagonal;
512 
513  FullPivLU<MatrixXd> lu(A);
514 
515  float64_t result = (eigen_temp_alpha.transpose()*(eigen_function-eigen_m_means))[0]/2.0 -
516  lp + log(lu.determinant())/2.0;
517 
518  return result;
519  }
520 
521  else
522  {
523 
524  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
525 
526  LLT<MatrixXd> L(
527  (eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_temp_kernel*m_scale*m_scale) +
528  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
529 
530  MatrixXd chol = L.matrixL();
531 
532  float64_t sum = 0;
533 
534  for (index_t i = 0; i < m_L.num_rows; i++)
535  sum += log(m_L(i,i));
536 
537  float64_t result = eigen_temp_alpha.dot(eigen_function-eigen_m_means)/2.0 -
538  lp + sum;
539 
540  return result;
541  }
542 
543 }
544 
546 {
548  update_all();
549 
551  return result;
552 }
553 
555 {
557  update_all();
558 
559  SGMatrix<float64_t> result(m_L);
560  return result;
561 }
562 
564 {
565  m_kernel->cleanup();
566 
568 
569  //K(X, X)
571 
572  m_ktrtr=kernel_matrix.clone();
573 
574  temp_kernel =SGMatrix<float64_t>(kernel_matrix.num_rows, kernel_matrix.num_cols);
575 
576  for (index_t i = 0; i < kernel_matrix.num_rows; i++)
577  {
578  for (index_t j = 0; j < kernel_matrix.num_cols; j++)
579  temp_kernel(i,j) = kernel_matrix(i,j);
580  }
581 }
582 
583 
585 {
586  check_members();
587 
588  Map<VectorXd> eigen_W(W.vector, W.vlen);
589 
590  Map<MatrixXd> eigen_temp_kernel(temp_kernel.matrix,
591  temp_kernel.num_rows, temp_kernel.num_cols);
592 
593 
594  if (eigen_W.minCoeff() < 0)
595  {
596  MatrixXd temp_diagonal(sW.vlen, sW.vlen);
597  temp_diagonal.setZero(sW.vlen, sW.vlen);
598 
599  for (index_t s = 0; s < temp_diagonal.rows(); s++)
600  {
601  for (index_t r = 0; r < temp_diagonal.cols(); r++)
602  temp_diagonal(s,s) = 1.0/W[s];
603  }
604 
605 
606  MatrixXd A = eigen_temp_kernel*m_scale*m_scale+temp_diagonal;
607 
608  FullPivLU<MatrixXd> lu(A);
609 
610  MatrixXd chol = -lu.inverse();
611 
612  m_L = SGMatrix<float64_t>(chol.rows(), chol.cols());
613 
614  for (index_t i = 0; i < chol.rows(); i++)
615  {
616  for (index_t j = 0; j < chol.cols(); j++)
617  m_L(i,j) = chol(i,j);
618  }
619 
620  }
621 
622  else
623  {
624 
625  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
626 
627  LLT<MatrixXd> L(
628  (eigen_sW*eigen_sW.transpose()).cwiseProduct((eigen_temp_kernel*m_scale*m_scale)) +
629  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
630 
631  MatrixXd chol = L.matrixU();
632 
633  m_L = SGMatrix<float64_t>(chol.rows(), chol.cols());
634 
635  for (index_t i = 0; i < chol.rows(); i++)
636  {
637  for (index_t j = 0; j < chol.cols(); j++)
638  m_L(i,j) = chol(i,j);
639  }
640  }
641 }
642 
644 {
645  float64_t Psi_Old = CMath::INFTY;
646  float64_t Psi_New;
647  float64_t Psi_Def;
648 
650 
651  m_means = SGVector<float64_t>(temp_mean.vlen);
654  VectorXd first_derivative;
655 
656  for (index_t i = 0; i < temp_mean.vlen; i++)
657  m_means[i] = temp_mean[i];
658 
659  for (index_t i = 0; i < m_alpha.vlen; i++)
660  temp_alpha[i] = m_alpha[i];
661 
662  for (index_t i = 0; i < m_ktrtr.num_rows; i++)
663  {
664  for (index_t j = 0; j < m_ktrtr.num_cols; j++)
665  temp_kernel(i,j) = m_ktrtr(i,j);
666  }
667 
668  Map<MatrixXd> eigen_temp_kernel(temp_kernel.matrix, temp_kernel.num_rows, temp_kernel.num_cols);
669 
670  VectorXd eigen_function;
671 
672  Map<VectorXd> eigen_m_means(m_means.vector, m_means.vlen);
673 
675  {
676 
677 
679 
680  for (index_t i = 0; i < temp_alpha.vlen; i++)
681  temp_alpha[i] = 0.0;
682 
683  Map<VectorXd> eigen_temp_alpha2(temp_alpha.vector, temp_alpha.vlen);
684 
685  eigen_function = eigen_temp_kernel*eigen_temp_alpha2*m_scale*m_scale+eigen_m_means;
686 
687  function = SGVector<float64_t>(eigen_function.rows());
688 
689  for (index_t i = 0; i < eigen_function.rows(); i++)
690  function[i] = eigen_function[i];
691 
693  (CRegressionLabels*)m_labels, function, 2);
694  for (index_t i = 0; i < W.vlen; i++)
695  W[i] = -W[i];
696 
697  Psi_New = -m_model->get_log_probability_f(
698  (CRegressionLabels*)m_labels, function);
699  }
700 
701 
702  else
703  {
704 
705  Map<VectorXd> eigen_temp_alpha2(temp_alpha.vector, temp_alpha.vlen);
706 
707  eigen_function = eigen_temp_kernel*m_scale*m_scale*eigen_temp_alpha2+eigen_m_means;
708 
709  function = SGVector<float64_t>(eigen_function.rows());
710 
711  for (index_t i = 0; i < eigen_function.rows(); i++)
712  function[i] = eigen_function[i];
713 
714 
715 
717  (CRegressionLabels*)m_labels, function, 2);
718 
719 
720  for (index_t i = 0; i < W.vlen; i++)
721  W[i] = -W[i];
722 
723  Psi_New = (eigen_temp_alpha2.transpose()*(eigen_function-eigen_m_means))[0]/2.0;
724 
725  Psi_New -= m_model->get_log_probability_f(
726  (CRegressionLabels*)m_labels, function);
727 
728 
729 
730  Psi_Def = -m_model->get_log_probability_f(
731  (CRegressionLabels*)m_labels, m_means);
732 
733  if (Psi_Def < Psi_New)
734  {
735  eigen_temp_alpha2 = eigen_temp_alpha2.Zero(m_labels->get_num_labels());
736 
737  for (index_t i = 0; i < m_alpha.vlen; i++)
738  temp_alpha[i] = eigen_temp_alpha2[i];
739 
741  (CRegressionLabels*)m_labels, function, 2);
742 
743  for (index_t i = 0; i < W.vlen; i++)
744  W[i] = -W[i];
745 
746  Psi_New = -m_model->get_log_probability_f(
747  (CRegressionLabels*)m_labels, function);
748  }
749  }
750 
751  Map<VectorXd> eigen_temp_alpha(temp_alpha.vector, temp_alpha.vlen);
752 
753  index_t itr = 0;
754 
756  (CRegressionLabels*)m_labels, function, 1);
757 
759  (CRegressionLabels*)m_labels, function, 2);
760 
762  (CRegressionLabels*)m_labels, function, 3);
763 
764  Map<VectorXd> eigen_d2lp(d2lp.vector, d2lp.vlen);
765 
766  sW = W.clone();
767 
768  while (Psi_Old - Psi_New > m_tolerance && itr < m_max_itr)
769  {
770 
771  Map<VectorXd> eigen_W(W.vector, W.vlen);
772 
773  Psi_Old = Psi_New;
774  itr++;
775 
776  if (eigen_W.minCoeff() < 0)
777  {
778  //Suggested by Vanhatalo et. al.,
779  //Gaussian Process Regression with Student's t likelihood, NIPS 2009
780  //Quoted from infLaplace.m
782 
783  for (index_t i = 0; i < eigen_W.rows(); i++)
784  eigen_W[i] += 2.0/(df)*dlp[i]*dlp[i];
785 
786  }
787 
788  for (index_t i = 0; i < eigen_W.rows(); i++)
789  W[i] = eigen_W[i];
790 
791  for (index_t i = 0; i < W.vlen; i++)
792  sW[i] = CMath::sqrt(float64_t(W[i]));
793 
794  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
795 
796  LLT<MatrixXd> L((eigen_sW*eigen_sW.transpose()).cwiseProduct(
797  eigen_temp_kernel*m_scale*m_scale) +
798  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
799 
800  MatrixXd chol = L.matrixL();
801 
802  MatrixXd temp = L.matrixL();
803 
804  Map<VectorXd> temp_eigen_dlp(dlp.vector, dlp.vlen);
805 
806  VectorXd b = eigen_W.cwiseProduct((eigen_function - eigen_m_means)) + temp_eigen_dlp;
807 
808  chol = chol.colPivHouseholderQr().solve(eigen_sW.cwiseProduct(
809  (eigen_temp_kernel*b*m_scale*m_scale)));
810 
811  chol = temp.transpose().colPivHouseholderQr().solve(chol);
812 
813  VectorXd dalpha = b - eigen_sW.cwiseProduct(chol) - eigen_temp_alpha;
814 
815  Psi_line func;
816 
817  func.lab = (CRegressionLabels*)m_labels;
818  func.K = &eigen_temp_kernel;
819  func.scale = m_scale;
820  func.alpha = &eigen_temp_alpha;
821  func.dalpha = &dalpha;
822  func.l1 = &lp;
823  func.dl1 = &dlp;
824  func.dl2 = &eigen_d2lp;
825  func.f = &function;
826  func.lik = m_model;
827  func.m = &m_means;
828  func.mW = &W;
829  func.start_alpha = eigen_temp_alpha;
830  local_min(0, m_max, m_opt_tolerance, func, Psi_New);
831  }
832 
833  for (index_t i = 0; i < m_alpha.vlen; i++)
834  temp_alpha[i] = eigen_temp_alpha[i];
835 
836  Map<VectorXd> eigen_dlp(dlp.vector, dlp.vlen);
837 
838  eigen_function = eigen_temp_kernel*m_scale*m_scale*eigen_temp_alpha+eigen_m_means;
839 
840  function = SGVector<float64_t>(eigen_function.rows());
841 
842  for (index_t i = 0; i < eigen_function.rows(); i++)
843  function[i] = eigen_function[i];
844 
846 
848  (CRegressionLabels*)m_labels, function, 1);
849 
851  (CRegressionLabels*)m_labels, function, 2);
852 
854  (CRegressionLabels*)m_labels, function, 3);
855 
857  (CRegressionLabels*)m_labels, function, 2);
858 
859  for (index_t i = 0; i < W.vlen; i++)
860  W[i] = -W[i];
861 
862  for (index_t i = 0; i < sW.vlen; i++)
863  sW[i] = 0.0;
864 
865  Map<VectorXd> eigen_W2(W.vector, W.vlen);
866 
867  if (eigen_W2.minCoeff() > 0)
868  {
869  for (index_t i = 0; i < sW.vlen; i++)
870  sW[i] = CMath::sqrt(float64_t(W[i]));
871  }
872 
873  m_alpha = SGVector<float64_t>(eigen_temp_alpha.rows());
874 
875  for (index_t i = 0; i < m_alpha.vlen; i++)
876  m_alpha[i] = eigen_temp_alpha[i];
877 
878 }
879 
880 #endif // HAVE_EIGEN3
881 #endif // HAVE_LAPACK
882 

SHOGUN Machine Learning Toolbox - Documentation