SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
SingleLaplacianInferenceMethod.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Roman Votyakov
8  * Copyright (C) 2012 Jacob Walker
9  * Copyright (C) 2013 Roman Votyakov
10  *
11  * Code adapted from Gaussian Process Machine Learning Toolbox
12  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
13  * This code specifically adapted from infLaplace.m
14  */
15 
17 
18 #ifdef HAVE_EIGEN3
19 
22 #include <shogun/lib/external/brent.h>
24 
25 using namespace shogun;
26 using namespace Eigen;
27 
28 namespace shogun
29 {
30 
31 #ifndef DOXYGEN_SHOULD_SKIP_THIS
32 
34 class CPsiLine : public func_base
35 {
36 public:
37  float64_t log_scale;
38  MatrixXd K;
39  VectorXd dalpha;
40  VectorXd start_alpha;
41  Map<VectorXd>* alpha;
46  CLikelihoodModel* lik;
47  CLabels* lab;
48 
49  virtual double operator() (double x)
50  {
51  Map<VectorXd> eigen_f(f->vector, f->vlen);
52  Map<VectorXd> eigen_m(m->vector, m->vlen);
53 
54  // compute alpha=alpha+x*dalpha and f=K*alpha+m
55  (*alpha)=start_alpha+x*dalpha;
56  eigen_f=K*(*alpha)*CMath::exp(log_scale*2.0)+eigen_m;
57 
58  // get first and second derivatives of log likelihood
59  (*dlp)=lik->get_log_probability_derivative_f(lab, (*f), 1);
60 
61  (*W)=lik->get_log_probability_derivative_f(lab, (*f), 2);
62  W->scale(-1.0);
63 
64  // compute psi=alpha'*(f-m)/2-lp
65  float64_t result = (*alpha).dot(eigen_f-eigen_m)/2.0-
67 
68  return result;
69  }
70 };
71 
72 #endif /* DOXYGEN_SHOULD_SKIP_THIS */
73 
75 {
76  init();
77 }
78 
80  CFeatures* feat, CMeanFunction* m, CLabels* lab, CLikelihoodModel* mod)
81  : CLaplacianInferenceBase(kern, feat, m, lab, mod)
82 {
83  init();
84 }
85 
86 void CSingleLaplacianInferenceMethod::init()
87 {
88  m_Psi=0;
89  SG_ADD(&m_Psi, "Psi", "posterior log likelihood without constant terms", MS_NOT_AVAILABLE);
90  SG_ADD(&m_sW, "sW", "square root of W", MS_NOT_AVAILABLE);
91  SG_ADD(&m_d2lp, "d2lp", "second derivative of log likelihood with respect to function location", MS_NOT_AVAILABLE);
92  SG_ADD(&m_d3lp, "d3lp", "third derivative of log likelihood with respect to function location", MS_NOT_AVAILABLE);
93 }
94 
96 {
98  update();
99 
100  return SGVector<float64_t>(m_sW);
101 
102 }
103 
105  CInferenceMethod* inference)
106 {
107  if (inference==NULL)
108  return NULL;
109 
110  if (inference->get_inference_type()!=INF_LAPLACIAN_SINGLE)
111  SG_SERROR("Provided inference is not of type CSingleLaplacianInferenceMethod\n")
112 
113  SG_REF(inference);
114  return (CSingleLaplacianInferenceMethod*)inference;
115 }
116 
118 {
119 }
120 
122 {
124  update();
125 
126  // create eigen representations alpha, f, W, L
127  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
128  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
129  Map<VectorXd> eigen_W(m_W.vector, m_W.vlen);
131 
132  // get mean vector and create eigen representation of it
134  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
135 
136  // get log likelihood
138  m_mu));
139 
140  float64_t result;
141 
142  if (eigen_W.minCoeff()<0)
143  {
144  Map<VectorXd> eigen_sW(m_sW.vector, m_sW.vlen);
146 
147  FullPivLU<MatrixXd> lu(MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols)+
148  eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_sW.asDiagonal());
149 
150  result=(eigen_alpha.dot(eigen_mu-eigen_mean))/2.0-
151  lp+log(lu.determinant())/2.0;
152  }
153  else
154  {
155  result=eigen_alpha.dot(eigen_mu-eigen_mean)/2.0-lp+
156  eigen_L.diagonal().array().log().sum();
157  }
158 
159  return result;
160 }
161 
163 {
166  Map<VectorXd> eigen_sW(m_sW.vector, m_sW.vlen);
167 
170 
171  // compute V = L^(-1) * W^(1/2) * K, using upper triangular factor L^T
172  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
173  eigen_sW.asDiagonal()*eigen_K*CMath::exp(m_log_scale*2.0));
174 
175  // compute covariance matrix of the posterior:
176  // Sigma = K - K * W^(1/2) * (L * L^T)^(-1) * W^(1/2) * K =
177  // K - (K * W^(1/2)) * (L^T)^(-1) * L^(-1) * W^(1/2) * K =
178  // K - (W^(1/2) * K)^T * (L^(-1))^T * L^(-1) * W^(1/2) * K = K - V^T * V
179  eigen_Sigma=eigen_K*CMath::exp(m_log_scale*2.0)-eigen_V.adjoint()*eigen_V;
180 }
181 
183 {
184  // get log probability derivatives
188 
189  // W = -d2lp
190  m_W=m_d2lp.clone();
191  m_W.scale(-1.0);
193 
194  // compute sW
195  Map<VectorXd> eigen_W(m_W.vector, m_W.vlen);
196  Map<VectorXd> eigen_sW(m_sW.vector, m_sW.vlen);
197 
198  if (eigen_W.minCoeff()>0)
199  eigen_sW=eigen_W.cwiseSqrt();
200  else
201  //post.sW = sqrt(abs(W)).*sign(W);
202  eigen_sW=((eigen_W.array().abs()+eigen_W.array())/2).sqrt()-((eigen_W.array().abs()-eigen_W.array())/2).sqrt();
203 
204  // create eigen representation of kernel matrix
206 
207  // create shogun and eigen representation of posterior cholesky
210 
211  if (eigen_W.minCoeff() < 0)
212  {
213  //A = eye(n)+K.*repmat(w',n,1);
214  FullPivLU<MatrixXd> lu(
215  MatrixXd::Identity(m_ktrtr.num_rows,m_ktrtr.num_cols)+
216  eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_W.asDiagonal());
217  // compute cholesky: L = -(K + 1/W)^-1
218  //-iA = -inv(A)
219  eigen_L=-lu.inverse();
220  // -repmat(w,1,n).*iA == (-iA'.*repmat(w',n,1))'
221  eigen_L=eigen_W.asDiagonal()*eigen_L;
222  }
223  else
224  {
225  // compute cholesky: L = chol(sW * sW' .* K + I)
226  LLT<MatrixXd> L(
227  (eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*CMath::exp(m_log_scale*2.0))+
228  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
229 
230  eigen_L = L.matrixU();
231  }
232 }
233 
235 {
236  SG_DEBUG("entering\n");
237 
239  update_init();
240  update_alpha();
241  update_chol();
242  m_gradient_update=false;
244 
245  SG_DEBUG("leaving\n");
246 }
247 
248 
250 {
251  float64_t Psi_New;
252  float64_t Psi_Def;
253  // get mean vector and create eigen representation of it
255  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
256 
257  // create eigen representation of kernel matrix
259 
260  // create shogun and eigen representation of function vector
262  Map<VectorXd> eigen_mu(m_mu, m_mu.vlen);
263 
265  {
266  // set alpha a zero vector
268  m_alpha.zero();
269 
270  // f = mean, if length of alpha and length of y doesn't match
271  eigen_mu=eigen_mean;
272 
274  m_labels, m_mu));
275  }
276  else
277  {
278  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
279 
280  // compute f = K * alpha + m
281  eigen_mu=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_alpha+eigen_mean;
282 
283  Psi_New=eigen_alpha.dot(eigen_mu-eigen_mean)/2.0-
285 
287 
288  // if default is better, then use it
289  if (Psi_Def < Psi_New)
290  {
291  m_alpha.zero();
292  eigen_mu=eigen_mean;
293  Psi_New=Psi_Def;
294  }
295  }
296  m_Psi=Psi_New;
297 }
298 
300 {
301  float64_t Psi_Old=CMath::INFTY;
302  float64_t Psi_New=m_Psi;
303 
304  // get mean vector and create eigen representation of it
306  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
307 
308  // create eigen representation of kernel matrix
310 
311  Map<VectorXd> eigen_mu(m_mu, m_mu.vlen);
312 
313  // compute W = -d2lp
315  m_W.scale(-1.0);
316 
317  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
318 
319  // get first derivative of log probability function
321 
322  // create shogun and eigen representation of sW
324  Map<VectorXd> eigen_sW(m_sW.vector, m_sW.vlen);
325 
326  index_t iter=0;
327 
328  while (Psi_Old-Psi_New>m_tolerance && iter<m_iter)
329  {
330  Map<VectorXd> eigen_W(m_W.vector, m_W.vlen);
331  Map<VectorXd> eigen_dlp(m_dlp.vector, m_dlp.vlen);
332 
333  Psi_Old = Psi_New;
334  iter++;
335 
336  if (eigen_W.minCoeff() < 0)
337  {
338  // Suggested by Vanhatalo et. al.,
339  // Gaussian Process Regression with Student's t likelihood, NIPS 2009
340  // Quoted from infLaplace.m
341  float64_t df;
342 
344  {
346  df=lik->get_degrees_freedom();
347  SG_UNREF(lik);
348  }
349  else
350  df=1;
351 
352  eigen_W+=(2.0/df)*eigen_dlp.cwiseProduct(eigen_dlp);
353  }
354 
355  // compute sW = sqrt(W)
356  eigen_sW=eigen_W.cwiseSqrt();
357 
358  LLT<MatrixXd> L((eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_ktrtr*CMath::exp(m_log_scale*2.0))+
359  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
360 
361  VectorXd b=eigen_W.cwiseProduct(eigen_mu - eigen_mean)+eigen_dlp;
362 
363  VectorXd dalpha=b-eigen_sW.cwiseProduct(
364  L.solve(eigen_sW.cwiseProduct(eigen_ktrtr*b*CMath::exp(m_log_scale*2.0))))-eigen_alpha;
365 
366  // perform Brent's optimization
367  CPsiLine func;
368 
369  func.log_scale=m_log_scale;
370  func.K=eigen_ktrtr;
371  func.dalpha=dalpha;
372  func.start_alpha=eigen_alpha;
373  func.alpha=&eigen_alpha;
374  func.dlp=&m_dlp;
375  func.f=&m_mu;
376  func.m=&mean;
377  func.W=&m_W;
378  func.lik=m_model;
379  func.lab=m_labels;
380 
381  float64_t x;
382  Psi_New=local_min(0, m_opt_max, m_opt_tolerance, func, x);
383  }
384 
385  if (Psi_Old-Psi_New>m_tolerance && iter>=m_iter)
386  {
387  SG_WARNING("Max iterations (%d) reached, but convergence level (%f) is not yet below tolerance (%f)\n", m_iter, Psi_Old-Psi_New, m_tolerance);
388  }
389 
390  // compute f = K * alpha + m
391  eigen_mu=eigen_ktrtr*CMath::exp(m_log_scale*2.0)*eigen_alpha+eigen_mean;
392 }
393 
395 {
396  // create eigen representation of W, sW, dlp, d3lp, K, alpha and L
397  Map<VectorXd> eigen_W(m_W.vector, m_W.vlen);
398  Map<VectorXd> eigen_sW(m_sW.vector, m_sW.vlen);
399  Map<VectorXd> eigen_dlp(m_dlp.vector, m_dlp.vlen);
400  Map<VectorXd> eigen_d3lp(m_d3lp.vector, m_d3lp.vlen);
402  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
404 
405  // create shogun and eigen representation of matrix Z
408 
409  // create shogun and eigen representation of the vector g
411  Map<VectorXd> eigen_g(m_g.vector, m_g.vlen);
412 
413  if (eigen_W.minCoeff()<0)
414  {
415  eigen_Z=-eigen_L;
416 
417  // compute iA = (I + K * diag(W))^-1
418  FullPivLU<MatrixXd> lu(MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols)+
419  eigen_K*CMath::exp(m_log_scale*2.0)*eigen_W.asDiagonal());
420  MatrixXd iA=lu.inverse();
421 
422  // compute derivative ln|L'*L| wrt W: g=sum(iA.*K,2)/2
423  eigen_g=(iA.cwiseProduct(eigen_K*CMath::exp(m_log_scale*2.0))).rowwise().sum()/2.0;
424  }
425  else
426  {
427  // solve L'*L*Z=diag(sW) and compute Z=diag(sW)*Z
428  eigen_Z=eigen_L.triangularView<Upper>().adjoint().solve(
429  MatrixXd(eigen_sW.asDiagonal()));
430  eigen_Z=eigen_L.triangularView<Upper>().solve(eigen_Z);
431  eigen_Z=eigen_sW.asDiagonal()*eigen_Z;
432 
433  // solve L'*C=diag(sW)*K
434  MatrixXd C=eigen_L.triangularView<Upper>().adjoint().solve(
435  eigen_sW.asDiagonal()*eigen_K*CMath::exp(m_log_scale*2.0));
436 
437  // compute derivative ln|L'*L| wrt W: g=(diag(K)-sum(C.^2,1)')/2
438  eigen_g=(eigen_K.diagonal()*CMath::exp(m_log_scale*2.0)-
439  (C.cwiseProduct(C)).colwise().sum().adjoint())/2.0;
440  }
441 
442  // create shogun and eigen representation of the vector dfhat
444  Map<VectorXd> eigen_dfhat(m_dfhat.vector, m_dfhat.vlen);
445 
446  // compute derivative of nlZ wrt fhat
447  eigen_dfhat=eigen_g.cwiseProduct(eigen_d3lp);
448 }
449 
451  const TParameter* param)
452 {
453  REQUIRE(!strcmp(param->m_name, "log_scale"), "Can't compute derivative of "
454  "the nagative log marginal likelihood wrt %s.%s parameter\n",
455  get_name(), param->m_name)
456 
457  // create eigen representation of K, Z, dfhat, dlp and alpha
460  Map<VectorXd> eigen_dfhat(m_dfhat.vector, m_dfhat.vlen);
461  Map<VectorXd> eigen_dlp(m_dlp.vector, m_dlp.vlen);
462  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
463 
464  SGVector<float64_t> result(1);
465 
466  // compute derivative K wrt scale
467  // compute dnlZ=sum(sum(Z.*dK))/2-alpha'*dK*alpha/2
468  result[0]=(eigen_Z.cwiseProduct(eigen_K)).sum()/2.0-
469  (eigen_alpha.adjoint()*eigen_K).dot(eigen_alpha)/2.0;
470 
471  // compute b=dK*dlp
472  VectorXd b=eigen_K*eigen_dlp;
473 
474  // compute dnlZ=dnlZ-dfhat'*(b-K*(Z*b))
475  result[0]=result[0]-eigen_dfhat.dot(b-eigen_K*CMath::exp(m_log_scale*2.0)*(eigen_Z*b));
476  result[0]*=CMath::exp(m_log_scale*2.0)*2.0;
477 
478  return result;
479 }
480 
482  const TParameter* param)
483 {
484  // create eigen representation of K, Z, g and dfhat
487  Map<VectorXd> eigen_g(m_g.vector, m_g.vlen);
488  Map<VectorXd> eigen_dfhat(m_dfhat.vector, m_dfhat.vlen);
489 
490  // get derivatives wrt likelihood model parameters
492  m_mu, param);
494  m_mu, param);
496  m_mu, param);
497 
498  // create eigen representation of the derivatives
499  Map<VectorXd> eigen_lp_dhyp(lp_dhyp.vector, lp_dhyp.vlen);
500  Map<VectorXd> eigen_dlp_dhyp(dlp_dhyp.vector, dlp_dhyp.vlen);
501  Map<VectorXd> eigen_d2lp_dhyp(d2lp_dhyp.vector, d2lp_dhyp.vlen);
502 
503  SGVector<float64_t> result(1);
504 
505  // compute b vector
506  VectorXd b=eigen_K*eigen_dlp_dhyp;
507 
508  // compute dnlZ=-g'*d2lp_dhyp-sum(lp_dhyp)-dfhat'*(b-K*(Z*b))
509  result[0]=-eigen_g.dot(eigen_d2lp_dhyp)-eigen_lp_dhyp.sum()-
510  eigen_dfhat.dot(b-eigen_K*CMath::exp(m_log_scale*2.0)*(eigen_Z*b));
511 
512  return result;
513 }
514 
516  const TParameter* param)
517 {
518  // create eigen representation of K, Z, dfhat, dlp and alpha
521  Map<VectorXd> eigen_dfhat(m_dfhat.vector, m_dfhat.vlen);
522  Map<VectorXd> eigen_dlp(m_dlp.vector, m_dlp.vlen);
523  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
524 
525  REQUIRE(param, "Param not set\n");
526  SGVector<float64_t> result;
527  int64_t len=const_cast<TParameter *>(param)->m_datatype.get_num_elements();
528  result=SGVector<float64_t>(len);
529 
530  for (index_t i=0; i<result.vlen; i++)
531  {
533 
534  if (result.vlen==1)
535  dK=m_kernel->get_parameter_gradient(param);
536  else
537  dK=m_kernel->get_parameter_gradient(param, i);
538 
539  Map<MatrixXd> eigen_dK(dK.matrix, dK.num_cols, dK.num_rows);
540 
541  // compute dnlZ=sum(sum(Z.*dK))/2-alpha'*dK*alpha/2
542  result[i]=(eigen_Z.cwiseProduct(eigen_dK)).sum()/2.0-
543  (eigen_alpha.adjoint()*eigen_dK).dot(eigen_alpha)/2.0;
544 
545  // compute b=dK*dlp
546  VectorXd b=eigen_dK*eigen_dlp;
547 
548  // compute dnlZ=dnlZ-dfhat'*(b-K*(Z*b))
549  result[i]=result[i]-eigen_dfhat.dot(b-eigen_K*CMath::exp(m_log_scale*2.0)*
550  (eigen_Z*b));
551  result[i]*=CMath::exp(m_log_scale*2.0);
552  }
553 
554  return result;
555 }
556 
558  const TParameter* param)
559 {
560  // create eigen representation of K, Z, dfhat and alpha
563  Map<VectorXd> eigen_dfhat(m_dfhat.vector, m_dfhat.vlen);
564  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
565 
566  REQUIRE(param, "Param not set\n");
567  SGVector<float64_t> result;
568  int64_t len=const_cast<TParameter *>(param)->m_datatype.get_num_elements();
569  result=SGVector<float64_t>(len);
570 
571  for (index_t i=0; i<result.vlen; i++)
572  {
574 
575  if (result.vlen==1)
577  else
579 
580  Map<VectorXd> eigen_dmu(dmu.vector, dmu.vlen);
581 
582  // compute dnlZ=-alpha'*dm-dfhat'*(dm-K*(Z*dm))
583  result[i]=-eigen_alpha.dot(eigen_dmu)-eigen_dfhat.dot(eigen_dmu-
584  eigen_K*CMath::exp(m_log_scale*2.0)*(eigen_Z*eigen_dmu));
585  }
586 
587  return result;
588 }
589 
591 {
593 
595  Map<VectorXd> eigen_res(res.vector, res.vlen);
596 
597  Map<VectorXd> eigen_mu(m_mu, m_mu.vlen);
599  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
600  eigen_res=eigen_mu-eigen_mean;
601 
602  return res;
603 }
604 
605 }
606 
607 #endif /* HAVE_EIGEN3 */
virtual SGVector< float64_t > get_log_probability_f(const CLabels *lab, SGVector< float64_t > func) const =0
virtual void update_parameter_hash()
Definition: SGObject.cpp:248
SGVector< float64_t > m_alpha
The Inference Method base class.
int32_t index_t
Definition: common.h:62
Vector::Scalar dot(Vector a, Vector b)
Definition: Redux.h:56
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
static const float64_t INFTY
infinity
Definition: Math.h:2048
virtual SGVector< float64_t > get_second_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
virtual int32_t get_num_labels() const =0
Definition: SGMatrix.h:20
virtual ELikelihoodModelType get_model_type() const
parameter struct
#define REQUIRE(x,...)
Definition: SGIO.h:206
The Laplace approximation inference method base class.
index_t num_cols
Definition: SGMatrix.h:378
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
float64_t get_degrees_freedom() const
An abstract class of the mean function.
Definition: MeanFunction.h:49
void scale(T alpha)
Scale vector inplace.
Definition: SGVector.cpp:843
#define SG_REF(x)
Definition: SGObject.h:51
index_t num_rows
Definition: SGMatrix.h:376
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)
static CSingleLaplacianInferenceMethod * obtain_from_generic(CInferenceMethod *inference)
index_t vlen
Definition: SGVector.h:494
The SingleLaplace approximation inference method class for regression and binary Classification.
SGMatrix< float64_t > m_L
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)
double float64_t
Definition: common.h:50
static T sum(T *vec, int32_t len)
Return sum(vec)
Definition: SGVector.h:354
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
Class that models a Student's-t likelihood.
virtual SGVector< float64_t > get_parameter_derivative(const CFeatures *features, const TParameter *param, index_t index=-1)
Definition: MeanFunction.h:73
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
The class Features is the base class of all feature objects.
Definition: Features.h:68
#define SG_SERROR(...)
Definition: SGIO.h:179
static float64_t exp(float64_t x)
Definition: Math.h:621
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:850
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)
SGVector< T > clone() const
Definition: SGVector.cpp:209
virtual EInferenceType get_inference_type() const
virtual SGVector< float64_t > get_log_probability_derivative_f(const CLabels *lab, SGVector< float64_t > func, index_t i) const =0
static void inverse(SGMatrix< float64_t > matrix)
inverses square matrix in-place
Definition: SGMatrix.cpp:885
The Kernel base class.
Definition: Kernel.h:158
virtual SGVector< float64_t > get_third_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
static CStudentsTLikelihood * obtain_from_generic(CLikelihoodModel *likelihood)
virtual SGVector< float64_t > get_first_derivative(const CLabels *lab, SGVector< float64_t > func, const TParameter *param) const
virtual bool parameter_hash_changed()
Definition: SGObject.cpp:262
The Likelihood model base class.
SGMatrix< float64_t > m_ktrtr
CLikelihoodModel * m_model

SHOGUN Machine Learning Toolbox - Documentation