SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LaplacianInferenceMethod.h
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Copyright (C) 2012 Jacob Walker
8  *
9  * Code adapted from Gaussian Process Machine Learning Toolbox
10  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
11  *
12  */
13 
14 #ifndef CLAPLACIANINFERENCEMETHOD_H_
15 #define CLAPLACIANINFERENCEMETHOD_H_
16 
17 #include <shogun/lib/config.h>
18 #ifdef HAVE_EIGEN3
19 #ifdef HAVE_LAPACK
20 
22 
23 namespace shogun
24 {
25 
48 {
49 
50 public:
51 
52  /*Default Constructor*/
54 
55  /* Constructor
56  * @param kernel covariance function
57  * @param features features to use in inference
58  * @param mean mean function
59  * @param labels labels of the features
60  * @param model Likelihood model to use
61 ] */
62  CLaplacianInferenceMethod(CKernel* kernel, CFeatures* features,
63  CMeanFunction* mean, CLabels* labels, CLikelihoodModel* model);
64 
65  /*Destructor*/
67 
78 
90 
100  virtual SGVector<float64_t> get_alpha();
101 
102 
114 
126 
132  inline virtual const char* get_name() const
133  {
134  return "LaplacianInferenceMethod";
135  }
136 
137  /*Get the gradient
138  *
139  * @return Map of gradient. Keys are names of parameters, values are
140  * values of derivative with respect to that parameter.
141  */
144  {
145  return get_marginal_likelihood_derivatives(para_dict);
146  }
147 
148  /*Get the function value
149  *
150  * @return Vector that represents the function value
151  */
153  {
154  SGVector<float64_t> result(1);
155  result[0] = get_negative_marginal_likelihood();
156  return result;
157  }
158 
159  /*Get tolerance for Newton Iterations
160  *
161  * @return Tolerance for Newton Iterations
162  */
163  inline virtual float64_t get_newton_tolerance() {
164  return m_tolerance;}
165 
166  /*Set tolerance for Newton Iterations
167  *
168  * @param Tolerance for Newton Iterations
169  */
170  inline virtual void set_newton_tolerance(float64_t tol) {
171  m_tolerance = tol;}
172 
173  /*Get tolerance for Brent's Minimization Method
174  *
175  * @return tolerance for Brent's Minimization Method
176  */
178  return m_opt_tolerance;}
179 
180  /*Set tolerance for Brent's Minimization Method
181  *
182  * @param tolerance for Brent's Minimization Method
183  */
184  inline virtual void set_minimization_tolerance(float64_t tol) {
185  m_opt_tolerance = tol;}
186 
187  /*Get max iterations for Brent's Minimization Method
188  *
189  * @return max iterations for Brent's Minimization Method
190  */
191  inline virtual int32_t get_minimization_iterations() {
192  return m_max;}
193 
194  /*Set max iterations for Brent's Minimization Method
195  *
196  * @param max iterations for Brent's Minimization Method
197  */
198  inline virtual void set_minimization_tolerance(int32_t itr) {
199  m_max = itr;}
200 
201  /*Get max Newton iterations
202  *
203  * @return max Newton iterations
204  */
205  inline virtual int32_t get_newton_iterations() {
206  return m_max_itr;}
207 
208  /*Set max Newton iterations
209  *
210  * @param max Newton iterations
211  */
212  inline virtual void set_newton_tolerance(int32_t itr) {
213  m_max_itr = itr;}
214 
215 
216 
217 protected:
220  virtual void update_alpha();
221  virtual void update_chol();
222  virtual void update_train_kernel();
223  virtual void update_all();
224 
225 private:
226 
227  void init();
228 
229 private:
230 
234  void check_members();
235 
236  /*Amount of tolerance for Newton's Iterations*/
237  float64_t m_tolerance;
238 
239  /*Amount of tolerance for Brent's Minimization Method*/
240  float64_t m_opt_tolerance;
241 
242  /*Max iterations for Brent's Minimization Method*/
243  float64_t m_max;
244 
245  /*Max Newton Iterations*/
246  index_t m_max_itr;
247 
248  /*Kernel Matrix*/
249  SGMatrix<float64_t> temp_kernel;
250 
251  /*Eigen version of alpha vector*/
252  SGVector<float64_t> temp_alpha;
253 
254  /*Function Location*/
255  SGVector<float64_t> function;
256 
257  /*Noise Matrix*/
259 
260  /*Square root of W*/
262 
263  /*Eigen version of means vector*/
264  SGVector<float64_t> m_means;
265 
266  /*Derivative of log likelihood with respect
267  * to function location
268  */
270 
271  /*Second derivative of log likelihood with respect
272  * to function location
273  */
274  SGVector<float64_t> d2lp;
275 
276  /*Third derivative of log likelihood with respect
277  * to function location
278  */
279  SGVector<float64_t> d3lp;
280 
281  /*log likelihood*/
282  float64_t lp;
283 
284 };
285 
286 }
287 #endif // HAVE_EIGEN3
288 #endif // HAVE_LAPACK
289 
290 #endif /* CLAPLACIANINFERENCEMETHOD_H_ */

SHOGUN Machine Learning Toolbox - Documentation