SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
KLInferenceMethod.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  * Code adapted from
31  * http://hannes.nickisch.org/code/approxXX.tar.gz
32  * and Gaussian Process Machine Learning Toolbox
33  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
34  * and the reference paper is
35  * Nickisch, Hannes, and Carl Edward Rasmussen.
36  * "Approximations for Binary Gaussian Process Classification."
37  * Journal of Machine Learning Research 9.10 (2008).
38  *
39  */
40 
41 #ifndef _KLINFERENCEMETHOD_H_
42 #define _KLINFERENCEMETHOD_H_
43 
44 #include <shogun/lib/config.h>
45 
46 #ifdef HAVE_EIGEN3
50 
51 namespace shogun
52 {
53 
71 {
72 public:
75 
84  CKLInferenceMethod(CKernel* kernel, CFeatures* features,
85  CMeanFunction* mean, CLabels* labels, CLikelihoodModel* model);
86 
87  virtual ~CKLInferenceMethod();
88 
91  virtual EInferenceType get_inference_type() const { return INF_KL; }
92 
97  virtual const char* get_name() const { return "KLInferenceMethod"; }
98 
111 
124 
144 
149  virtual bool supports_regression() const
150  {
151  check_members();
152  return m_model->supports_regression();
153  }
154 
159  virtual bool supports_binary() const
160  {
161  check_members();
162  return m_model->supports_binary();
163  }
164 
169  virtual void set_model(CLikelihoodModel* mod);
170 
172  virtual void update();
173 
174  /* set L-BFGS parameters
175  * For details please see shogun/optimization/lbfgs/lbfgs.h
176  * @param m The number of corrections to approximate the inverse hessian matrix.
177  * Default value is 100.
178  * @param max_linesearch The maximum number of trials to do line search for each L-BFGS update.
179  * Default value is 1000.
180  * @param linesearch The line search algorithm.
181  * Default value is using the Morethuente line search
182  * @param max_iterations The maximum number of iterations for L-BFGS update.
183  * Default value is 1000.
184  * @param delta Delta for convergence test based on the change of function value.
185  * Default value is 0.
186  * @param past Distance for delta-based convergence test.
187  * Default value is 0.
188  * @param epsilon Epsilon for convergence test based on the change of gradient.
189  * Default value is 1e-5
190  * @param min_step The minimum step of the line search.
191  * The default value is 1e-20
192  * @param max_step The maximum step of the line search.
193  * The default value is 1e+20
194  * @param ftol A parameter used in Armijo condition.
195  * Default value is 1e-4
196  * @param wolfe A parameter used in curvature condition.
197  * Default value is 0.9
198  * @param gtol A parameter used in Morethuente linesearch to control the accuracy.
199  * Default value is 0.9
200  * @param xtol The machine precision for floating-point values.
201  * Default value is 1e-16.
202  * @param orthantwise_c Coeefficient for the L1 norm of variables.
203  * This parameter should be set to zero for standard minimization problems.
204  * Setting this parameter to a positive value activates
205  * Orthant-Wise Limited-memory Quasi-Newton (OWL-QN) method. Default value is 0.
206  * @param orthantwise_start Start index for computing L1 norm of the variables.
207  * This parameter is valid only for OWL-QN method. Default value is 0.
208  * @param orthantwise_end End index for computing L1 norm of the variables.
209  * Default value is 1.
210  */
211  virtual void set_lbfgs_parameters(int m = 100,
212  int max_linesearch = 1000,
213  int linesearch = LBFGS_LINESEARCH_DEFAULT,
214  int max_iterations = 1000,
215  float64_t delta = 0.0,
216  int past = 0,
217  float64_t epsilon = 1e-5,
218  float64_t min_step = 1e-20,
219  float64_t max_step = 1e+20,
220  float64_t ftol = 1e-4,
221  float64_t wolfe = 0.9,
222  float64_t gtol = 0.9,
223  float64_t xtol = 1e-16,
224  float64_t orthantwise_c = 0.0,
225  int orthantwise_start = 0,
226  int orthantwise_end = 1);
227 
244 
252  virtual void set_noise_factor(float64_t noise_factor);
253 
260  virtual void set_max_attempt(index_t max_attempt);
261 
268  virtual void set_exp_factor(float64_t exp_factor);
269 
276  virtual void set_min_coeff_kernel(float64_t min_coeff_kernel);
277 protected:
278 
281 
284 
287 
290 
294  virtual void update_init();
295 
300  virtual Eigen::LDLT<Eigen::MatrixXd> update_init_helper();
301 
306 
312  virtual void check_variational_likelihood(CLikelihoodModel* mod) const;
313 
315  virtual void update_approx_cov()=0;
316 
327  virtual float64_t get_derivative_related_cov(Eigen::MatrixXd eigen_dK)=0;
328 
330  virtual float64_t lbfgs_optimization();
331 
340  const TParameter* param);
341 
350  const TParameter* param);
351 
360  const TParameter* param);
361 
370  const TParameter* param);
371 
378 
385 
392 
401  virtual bool lbfgs_precompute()=0;
402 
407 
410 
415 
416  /* The number of corrections to approximate the inverse hessian matrix.*/
417  int m_m;
418 
419  /* The maximum number of trials to do line search for each L-BFGS update.*/
421 
422  /* The line search algorithm.*/
424 
425  /* The maximum number of iterations for L-BFGS update.*/
427 
428  /* Delta for convergence test based on the change of function value.*/
430 
431  /* Distance for delta-based convergence test.*/
432  int m_past;
433 
434  /* Epsilon for convergence test based on the change of gradient.*/
436 
437  /* The minimum step of the line search.*/
439 
440  /* The maximum step of the line search.*/
442 
443  /* A parameter used in Armijo condition.*/
445 
446  /* A parameter used in curvature condition.*/
448 
449  /* A parameter used in Morethuente linesearch to control the accuracy.*/
451 
452  /* The machine precision for floating-point values.*/
454 
455  /* Coeefficient for the L1 norm of variables.*/
457 
458  /* Start index for computing L1 norm of the variables.*/
460 
461  /* End index for computing L1 norm of the variables.*/
463 
464 private:
465  void init();
466 
470  static float64_t evaluate(void *obj,
471  const float64_t *parameters,
472  float64_t *gradient,
473  const int dim,
474  const float64_t step);
475 
476 };
477 }
478 #endif /* HAVE_EIGEN3 */
479 #endif /* _KLINFERENCEMETHOD_H_ */

SHOGUN Machine Learning Toolbox - Documentation