SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
NumericalVGLikelihood.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  * Code adapted from
31  * and Gaussian Process Machine Learning Toolbox
32  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
33  *
34  * and the reference paper is
35  * Nickisch, Hannes, and Carl Edward Rasmussen.
36  * "Approximations for Binary Gaussian Process Classification."
37  * Journal of Machine Learning Research 9.10 (2008).
38  *
39  * This code specifically adapted from function in infKL.m
40  */
41 
43 
44 #ifdef HAVE_EIGEN3
46 #include <shogun/lib/SGVector.h>
47 #include <shogun/lib/SGMatrix.h>
49 
50 using namespace Eigen;
51 
52 namespace shogun
53 {
54 
55 CNumericalVGLikelihood::CNumericalVGLikelihood()
57 {
58  init();
59 }
60 
62 {
63 }
64 
65 void CNumericalVGLikelihood::init()
66 {
67  SG_ADD(&m_log_lam, "log_lam",
68  "The result of used for computing variational expection\n",
70 
71  SG_ADD(&m_xgh, "xgh",
72  "Gaussian-Hermite quadrature base points (abscissas)\n",
74 
75  SG_ADD(&m_wgh, "wgh",
76  "Gaussian-Hermite quadrature weight factors\n",
78 
79  SG_ADD(&m_GHQ_N, "GHQ_N",
80  "The number of Gaussian-Hermite quadrature point\n",
82 
83  SG_ADD(&m_is_init_GHQ, "is_init_GHQ",
84  "Whether Gaussian-Hermite quadrature points are initialized or not\n",
86  m_GHQ_N=20;
87  m_is_init_GHQ=false;
88 
89 }
90 
92 {
93  REQUIRE(n>0, "The number (%d) of Gaussian Hermite point should be positive\n",n);
94  if (m_GHQ_N!=n)
95  {
96  m_GHQ_N=n;
97  m_is_init_GHQ=false;
98  }
99 }
100 
102  const TParameter* param) const
103 {
104  REQUIRE(param, "Param is required (param should not be NULL)\n");
105  REQUIRE(param->m_name, "Param name is required (param->m_name should not be NULL)\n");
106  if (!(strcmp(param->m_name, "mu") && strcmp(param->m_name, "sigma2")))
107  return SGVector<float64_t> ();
108 
110  res.zero();
111  Map<VectorXd> eigen_res(res.vector, res.vlen);
112 
113  //ll = ll + w(i)*lp;
114  CLabels* lab=NULL;
115 
116  if (supports_binary())
117  lab=new CBinaryLabels(m_lab);
118  else if (supports_regression())
119  lab=new CRegressionLabels(m_lab);
120 
121  for (index_t cidx = 0; cidx < m_log_lam.num_cols; cidx++)
122  {
123  SGVector<float64_t> tmp(m_log_lam.get_column_vector(cidx), m_log_lam.num_rows, false);
124  SGVector<float64_t> lp=get_first_derivative(lab, tmp, param);
125  Map<VectorXd> eigen_lp(lp.vector, lp.vlen);
126  eigen_res+=eigen_lp*m_wgh[cidx];
127  }
128 
129  SG_UNREF(lab);
130 
131  return res;
132 }
133 
135 {
137  res.zero();
138  Map<VectorXd> eigen_res(res.vector, res.vlen);
139 
140  //ll = ll + w(i)*lp;
141  CLabels* lab=NULL;
142 
143  if (supports_binary())
144  lab=new CBinaryLabels(m_lab);
145  else if (supports_regression())
146  lab=new CRegressionLabels(m_lab);
147 
148  for (index_t cidx = 0; cidx < m_log_lam.num_cols; cidx++)
149  {
150  SGVector<float64_t> tmp(m_log_lam.get_column_vector(cidx), m_log_lam.num_rows, false);
152  Map<VectorXd> eigen_lp(lp.vector, lp.vlen);
153  eigen_res+=eigen_lp*m_wgh[cidx];
154  }
155 
156  SG_UNREF(lab);
157 
158  return res;
159 }
160 
162  const TParameter* param) const
163 {
164  //based on the likKL(v, lik, varargin) function in infKL.m
165 
166  //compute gradient using numerical integration
167  REQUIRE(param, "Param is required (param should not be NULL)\n");
168  REQUIRE(param->m_name, "Param name is required (param->m_name should not be NULL)\n");
169  //We take the derivative wrt to param. Only mu or sigma2 can be the param
170  REQUIRE(!(strcmp(param->m_name, "mu") && strcmp(param->m_name, "sigma2")),
171  "Can't compute derivative of the variational expection ",
172  "of log LogitLikelihood using numerical integration ",
173  "wrt %s.%s parameter. The function only accepts mu and sigma2 as parameter\n",
174  get_name(), param->m_name);
175 
177  res.zero();
178  Map<VectorXd> eigen_res(res.vector, res.vlen);
179 
180  Map<VectorXd> eigen_v(m_s2.vector, m_s2.vlen);
181 
182  CLabels* lab=NULL;
183 
184  if (supports_binary())
185  lab=new CBinaryLabels(m_lab);
186  else if (supports_regression())
187  lab=new CRegressionLabels(m_lab);
188 
189  if (strcmp(param->m_name, "mu")==0)
190  {
191  //Compute the derivative wrt mu
192 
193  //df = df + w(i)*(dlp);
194  for (index_t cidx=0; cidx<m_log_lam.num_cols; cidx++)
195  {
196  SGVector<float64_t> tmp(m_log_lam.get_column_vector(cidx), m_log_lam.num_rows, false);
198  Map<VectorXd> eigen_dlp(dlp.vector, dlp.vlen);
199  eigen_res+=eigen_dlp*m_wgh[cidx];
200  }
201  }
202  else
203  {
204  //Compute the derivative wrt sigma2
205 
206  //ai = t(i)./(2*sv+eps); dvi = dlp.*ai; dv = dv + w(i)*dvi;
207  VectorXd eigen_sv=eigen_v.array().sqrt().matrix();
208  const float64_t EPS=2.2204e-16;
209 
210  for (index_t cidx=0; cidx<m_log_lam.num_cols; cidx++)
211  {
212  SGVector<float64_t> tmp(m_log_lam.get_column_vector(cidx), m_log_lam.num_rows, false);
214  Map<VectorXd> eigen_dlp(dlp.vector, dlp.vlen);
215  eigen_res+=((m_wgh[cidx]*0.5*m_xgh[cidx])*eigen_dlp.array()/(eigen_sv.array()+EPS)).matrix();
216  }
217  }
218 
219  SG_UNREF(lab);
220 
221  return res;
222 }
223 
224 
226  SGVector<float64_t> s2, const CLabels* lab)
227 {
228  bool status = true;
230 
231  if (status)
232  {
233  if (supports_binary())
234  {
236  "Labels must be type of CBinaryLabels\n");
237  }
238  else
239  {
240  if (supports_regression())
241  {
243  "Labels must be type of CRegressionLabels\n");
244  }
245  else
246  SG_ERROR("Unsupported Label type\n");
247  }
248 
249  if (supports_binary())
250  m_lab=((CBinaryLabels*)lab)->get_labels();
251  else
252  m_lab=((CRegressionLabels*)lab)->get_labels();
253 
254  if (!m_is_init_GHQ)
255  {
256  m_xgh=SGVector<float64_t>(m_GHQ_N);
257  m_wgh=SGVector<float64_t>(m_GHQ_N);
258  CIntegration::generate_gauher(m_xgh, m_wgh);
259  m_is_init_GHQ=true;
260  }
261 
262  precompute();
263 
264  }
265 
266  return status;
267 }
268 
269 void CNumericalVGLikelihood::precompute()
270 {
271  //samples-by-abscissas
272  m_log_lam=SGMatrix<float64_t>(m_s2.vlen, m_xgh.vlen);
273 
274  Map<MatrixXd> eigen_log_lam(m_log_lam.matrix, m_log_lam.num_rows, m_log_lam.num_cols);
275  Map<VectorXd> eigen_v(m_s2.vector, m_s2.vlen);
276  Map<VectorXd> eigen_f(m_mu.vector, m_mu.vlen);
277  Map<VectorXd> eigen_t(m_xgh.vector, m_xgh.vlen);
278 
279  VectorXd eigen_sv=eigen_v.array().sqrt().matrix();
280  //varargin{3} = f + sv*t(i); % coordinate transform of the quadrature points
281  eigen_log_lam = (
282  (eigen_t.replicate(1, eigen_v.rows()).array().transpose().colwise()
283  *eigen_sv.array()).colwise()+eigen_f.array()
284  ).matrix();
285 }
286 
287 } /* namespace shogun */
288 
289 #endif /* HAVE_EIGEN3 */

SHOGUN Machine Learning Toolbox - Documentation