SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
ExactInferenceMethod.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Roman Votyakov
8  * Copyright (C) 2012 Jacob Walker
9  * Copyright (C) 2013 Roman Votyakov
10  *
11  * Code adapted from Gaussian Process Machine Learning Toolbox
12  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
13  */
14 
16 
17 #ifdef HAVE_EIGEN3
18 
23 
24 using namespace shogun;
25 using namespace Eigen;
26 
28 {
29 }
30 
32  CMeanFunction* m, CLabels* lab, CLikelihoodModel* mod) :
33  CInferenceMethod(kern, feat, m, lab, mod)
34 {
35 }
36 
38 {
39 }
40 
42 {
43  SG_DEBUG("entering\n");
44 
46  update_chol();
47  update_alpha();
48  update_deriv();
49  update_mean();
50  update_cov();
52 
53  SG_DEBUG("leaving\n");
54 }
55 
57 {
59 
61  "Exact inference method can only use Gaussian likelihood function\n")
63  "Labels must be type of CRegressionLabels\n")
64 }
65 
67 {
69  update();
70 
71  // get the sigma variable from the Gaussian likelihood model
73  float64_t sigma=lik->get_sigma();
74  SG_UNREF(lik);
75 
76  // compute diagonal vector: sW=1/sigma
78  result.fill_vector(result.vector, m_features->get_num_vectors(), 1.0/sigma);
79 
80  return result;
81 }
82 
84 {
86  update();
87 
88  // get the sigma variable from the Gaussian likelihood model
90  float64_t sigma=lik->get_sigma();
91  SG_UNREF(lik);
92 
93  // create eigen representation of alpha and L
94  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
96 
97  // get labels and mean vectors and create eigen representation
98  SGVector<float64_t> y=((CRegressionLabels*) m_labels)->get_labels();
99  Map<VectorXd> eigen_y(y.vector, y.vlen);
101  Map<VectorXd> eigen_m(m.vector, m.vlen);
102 
103  // compute negative log of the marginal likelihood:
104  // nlZ=(y-m)'*alpha/2+sum(log(diag(L)))+n*log(2*pi*sigma^2)/2
105  float64_t result=(eigen_y-eigen_m).dot(eigen_alpha)/2.0+
106  eigen_L.diagonal().array().log().sum()+m_L.num_rows*
107  CMath::log(2*CMath::PI*CMath::sq(sigma))/2.0;
108 
109  return result;
110 }
111 
113 {
115  update();
116 
118 }
119 
121 {
123  update();
124 
125  return SGMatrix<float64_t>(m_L);
126 }
127 
129 {
131  update();
132 
133  return SGVector<float64_t>(m_mu);
134 }
135 
137 {
139  update();
140 
141  return SGMatrix<float64_t>(m_Sigma);
142 }
143 
145 {
146  // get the sigma variable from the Gaussian likelihood model
148  float64_t sigma=lik->get_sigma();
149  SG_UNREF(lik);
150 
151  /* check whether to allocate cholesky memory */
154 
155  /* creates views on kernel and cholesky matrix and perform cholesky */
158  LLT<MatrixXd> llt(K*(CMath::sq(m_scale)/CMath::sq(sigma))+
159  MatrixXd::Identity(m_ktrtr.num_rows, m_ktrtr.num_cols));
160  L=llt.matrixU();
161 }
162 
164 {
165  // get the sigma variable from the Gaussian likelihood model
167  float64_t sigma=lik->get_sigma();
168  SG_UNREF(lik);
169 
170  // get labels and mean vector and create eigen representation
171  SGVector<float64_t> y=((CRegressionLabels*) m_labels)->get_labels();
172  Map<VectorXd> eigen_y(y.vector, y.vlen);
174  Map<VectorXd> eigen_m(m.vector, m.vlen);
175 
177 
178  /* creates views on cholesky matrix and alpha and solve system
179  * (L * L^T) * a = y for a */
182 
183  a=L.triangularView<Upper>().adjoint().solve(eigen_y-eigen_m);
184  a=L.triangularView<Upper>().solve(a);
185 
186  a/=CMath::sq(sigma);
187 }
188 
190 {
191  // create eigen representataion of kernel matrix and alpha
193  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
194 
195  // get mean and create eigen representation of it
197  Map<VectorXd> eigen_m(m.vector, m.vlen);
198 
199  m_mu=SGVector<float64_t>(m.vlen);
200  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
201 
202  eigen_mu=eigen_K*CMath::sq(m_scale)*eigen_alpha;
203 }
204 
206 {
207  // create eigen representataion of upper triangular factor L^T and kernel
208  // matrix
211 
213  Map<MatrixXd> eigen_Sigma(m_Sigma.matrix, m_Sigma.num_rows,
214  m_Sigma.num_cols);
215 
216  // compute V = L^(-1) * K, using upper triangular factor L^T
217  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
218  eigen_K*CMath::sq(m_scale));
219 
221  float64_t sigma=lik->get_sigma();
222  SG_UNREF(lik);
223  eigen_V = eigen_V/sigma;
224 
225  // compute covariance matrix of the posterior: Sigma = K - V^T * V
226  eigen_Sigma=eigen_K*CMath::sq(m_scale)-eigen_V.adjoint()*eigen_V;
227 }
228 
230 {
231  // get the sigma variable from the Gaussian likelihood model
233  float64_t sigma=lik->get_sigma();
234  SG_UNREF(lik);
235 
236  // create eigen representation of derivative matrix and cholesky
238  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
239 
241  Map<MatrixXd> eigen_Q(m_Q.matrix, m_Q.num_rows, m_Q.num_cols);
242 
243  // solve L * L' * Q = I
244  eigen_Q=eigen_L.triangularView<Upper>().adjoint().solve(
245  MatrixXd::Identity(m_L.num_rows, m_L.num_cols));
246  eigen_Q=eigen_L.triangularView<Upper>().solve(eigen_Q);
247 
248  // divide Q by sigma^2
249  eigen_Q/=CMath::sq(sigma);
250 
251  // create eigen representation of alpha and compute Q=Q-alpha*alpha'
252  eigen_Q-=eigen_alpha*eigen_alpha.transpose();
253 }
254 
256  const TParameter* param)
257 {
258  REQUIRE(!strcmp(param->m_name, "scale"), "Can't compute derivative of "
259  "the nagative log marginal likelihood wrt %s.%s parameter\n",
260  get_name(), param->m_name)
261 
263  Map<MatrixXd> eigen_Q(m_Q.matrix, m_Q.num_rows, m_Q.num_cols);
264 
265  SGVector<float64_t> result(1);
266 
267  // compute derivative wrt kernel scale: dnlZ=sum(Q.*K*scale*2)/2
268  result[0]=(eigen_Q.cwiseProduct(eigen_K)*m_scale*2.0).sum()/2.0;
269 
270  return result;
271 }
272 
274  CInferenceMethod* inference)
275 {
276  if (inference==NULL)
277  return NULL;
278 
279  if (inference->get_inference_type()!=INF_EXACT)
280  SG_SERROR("Provided inference is not of type CExactInferenceMethod!\n")
281 
282  SG_REF(inference);
283  return (CExactInferenceMethod*)inference;
284 }
285 
287  const TParameter* param)
288 {
289  REQUIRE(!strcmp(param->m_name, "sigma"), "Can't compute derivative of "
290  "the nagative log marginal likelihood wrt %s.%s parameter\n",
291  m_model->get_name(), param->m_name)
292 
293  // get the sigma variable from the Gaussian likelihood model
295  float64_t sigma=lik->get_sigma();
296  SG_UNREF(lik);
297 
298  // create eigen representation of the matrix Q
299  Map<MatrixXd> eigen_Q(m_Q.matrix, m_Q.num_rows, m_Q.num_cols);
300 
301  SGVector<float64_t> result(1);
302 
303  // compute derivative wrt likelihood model parameter sigma:
304  // dnlZ=sigma^2*trace(Q)
305  result[0]=CMath::sq(sigma)*eigen_Q.trace();
306 
307  return result;
308 }
309 
311  const TParameter* param)
312 {
313  // create eigen representation of the matrix Q
314  Map<MatrixXd> eigen_Q(m_Q.matrix, m_Q.num_rows, m_Q.num_cols);
315 
316  REQUIRE(param, "Param not set\n");
317  SGVector<float64_t> result;
318  int64_t len=const_cast<TParameter *>(param)->m_datatype.get_num_elements();
319  result=SGVector<float64_t>(len);
320 
321  for (index_t i=0; i<result.vlen; i++)
322  {
324 
325  if (result.vlen==1)
326  dK=m_kernel->get_parameter_gradient(param);
327  else
328  dK=m_kernel->get_parameter_gradient(param, i);
329 
330  Map<MatrixXd> eigen_dK(dK.matrix, dK.num_rows, dK.num_cols);
331 
332  // compute derivative wrt kernel parameter: dnlZ=sum(Q.*dK*scale)/2.0
333  result[i]=(eigen_Q.cwiseProduct(eigen_dK)*CMath::sq(m_scale)).sum()/2.0;
334  }
335 
336  return result;
337 }
338 
340  const TParameter* param)
341 {
342  // create eigen representation of alpha vector
343  Map<VectorXd> eigen_alpha(m_alpha.vector, m_alpha.vlen);
344 
345  REQUIRE(param, "Param not set\n");
346  SGVector<float64_t> result;
347  int64_t len=const_cast<TParameter *>(param)->m_datatype.get_num_elements();
348  result=SGVector<float64_t>(len);
349 
350  for (index_t i=0; i<result.vlen; i++)
351  {
353 
354  if (result.vlen==1)
356  else
358 
359  Map<VectorXd> eigen_dmu(dmu.vector, dmu.vlen);
360 
361  // compute derivative wrt mean parameter: dnlZ=-dmu'*alpha
362  result[i]=-eigen_dmu.dot(eigen_alpha);
363  }
364 
365  return result;
366 }
367 
368 #endif /* HAVE_EIGEN3 */
virtual const char * get_name() const =0
static void fill_vector(T *vec, int32_t len, T value)
Definition: SGVector.cpp:223
The Gaussian exact form inference method class.
virtual ELabelType get_label_type() const =0
virtual void update_parameter_hash()
Definition: SGObject.cpp:250
Class that models Gaussian likelihood.
Real Labels are real-valued labels.
SGVector< float64_t > m_alpha
The Inference Method base class.
virtual SGMatrix< float64_t > get_posterior_covariance()
int32_t index_t
Definition: common.h:62
Vector::Scalar dot(Vector a, Vector b)
Definition: Redux.h:56
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
real valued labels (e.g. for regression, classifier outputs)
Definition: LabelTypes.h:22
virtual const char * get_name() const
virtual SGVector< float64_t > get_alpha()
static T sq(T x)
Definition: Math.h:450
Definition: SGMatrix.h:20
virtual ELikelihoodModelType get_model_type() const
parameter struct
Definition: Parameter.h:32
virtual int32_t get_num_vectors() const =0
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:378
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
static CExactInferenceMethod * obtain_from_generic(CInferenceMethod *inference)
An abstract class of the mean function.
Definition: MeanFunction.h:28
#define SG_REF(x)
Definition: SGObject.h:51
index_t num_rows
Definition: SGMatrix.h:376
index_t vlen
Definition: SGVector.h:494
SGMatrix< float64_t > m_L
virtual SGMatrix< float64_t > get_cholesky()
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)
double float64_t
Definition: common.h:50
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)
virtual SGVector< float64_t > get_diagonal_vector()
static T sum(T *vec, int32_t len)
Return sum(vec)
Definition: SGVector.h:354
static CGaussianLikelihood * obtain_from_generic(CLikelihoodModel *lik)
virtual SGVector< float64_t > get_parameter_derivative(const CFeatures *features, const TParameter *param, index_t index=-1)
Definition: MeanFunction.h:52
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual float64_t get_negative_log_marginal_likelihood()
The class Features is the base class of all feature objects.
Definition: Features.h:68
#define SG_SERROR(...)
Definition: SGIO.h:179
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:850
static float64_t log(float64_t v)
Definition: Math.h:922
virtual void check_members() const
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)
virtual EInferenceType get_inference_type() const
The Kernel base class.
Definition: Kernel.h:158
virtual bool parameter_hash_changed()
Definition: SGObject.cpp:263
The Likelihood model base class.
SGMatrix< float64_t > m_ktrtr
CLikelihoodModel * m_model
virtual SGVector< float64_t > get_posterior_mean()
static const float64_t PI
Definition: Math.h:2055

SHOGUN Machine Learning Toolbox - Documentation