SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
LogitVGPiecewiseBoundLikelihood.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  * Code adapted from
31  * https://github.com/emtiyaz/VariationalApproxExample
32  * and the reference paper is
33  * Marlin, Benjamin M., Mohammad Emtiyaz Khan, and Kevin P. Murphy.
34  * "Piecewise Bounds for Estimating Bernoulli-Logistic Latent Gaussian Models." ICML. 2011.
35  *
36  * This code specifically adapted from ElogLik.m
37  * and from the formula of the appendix
38  * http://www.cs.ubc.ca/~emtiyaz/papers/truncatedGaussianMoments.pdf
39  */
40 
42 
48 
49 using namespace Eigen;
50 
51 namespace shogun
52 {
53 
54 CLogitVGPiecewiseBoundLikelihood::CLogitVGPiecewiseBoundLikelihood()
56 {
57  init();
58 }
59 
61 {
62 }
63 
65 {
66  m_bound = bound;
67 }
68 
70 {
71  index_t row = 20;
72  index_t col = 5;
73  m_bound = SGMatrix<float64_t>(row, col);
74 
75  m_bound(0,0)=0.000188712193000;
76  m_bound(1,0)=0.028090310300000;
77  m_bound(2,0)=0.110211757000000;
78  m_bound(3,0)=0.232736440000000;
79  m_bound(4,0)=0.372524706000000;
80  m_bound(5,0)=0.504567936000000;
81  m_bound(6,0)=0.606280283000000;
82  m_bound(7,0)=0.666125432000000;
83  m_bound(8,0)=0.689334264000000;
84  m_bound(9,0)=0.693147181000000;
85  m_bound(10,0)=0.693147181000000;
86  m_bound(11,0)=0.689334264000000;
87  m_bound(12,0)=0.666125432000000;
88  m_bound(13,0)=0.606280283000000;
89  m_bound(14,0)=0.504567936000000;
90  m_bound(15,0)=0.372524706000000;
91  m_bound(16,0)=0.232736440000000;
92  m_bound(17,0)=0.110211757000000;
93  m_bound(18,0)=0.028090310400000;
94  m_bound(19,0)=0.000188712000000;
95 
96  m_bound(0,1)=0;
97  m_bound(1,1)=0.006648614600000;
98  m_bound(2,1)=0.034432684600000;
99  m_bound(3,1)=0.088701969900000;
100  m_bound(4,1)=0.168024214000000;
101  m_bound(5,1)=0.264032863000000;
102  m_bound(6,1)=0.360755794000000;
103  m_bound(7,1)=0.439094482000000;
104  m_bound(8,1)=0.485091758000000;
105  m_bound(9,1)=0.499419205000000;
106  m_bound(10,1)=0.500580795000000;
107  m_bound(11,1)=0.514908242000000;
108  m_bound(12,1)=0.560905518000000;
109  m_bound(13,1)=0.639244206000000;
110  m_bound(14,1)=0.735967137000000;
111  m_bound(15,1)=0.831975786000000;
112  m_bound(16,1)=0.911298030000000;
113  m_bound(17,1)=0.965567315000000;
114  m_bound(18,1)=0.993351385000000;
115  m_bound(19,1)=1.000000000000000;
116 
117  m_bound(0,2)=0;
118  m_bound(1,2)=0.000397791059000;
119  m_bound(2,2)=0.002753100850000;
120  m_bound(3,2)=0.008770186980000;
121  m_bound(4,2)=0.020034759300000;
122  m_bound(5,2)=0.037511596000000;
123  m_bound(6,2)=0.060543032900000;
124  m_bound(7,2)=0.086256780600000;
125  m_bound(8,2)=0.109213531000000;
126  m_bound(9,2)=0.123026104000000;
127  m_bound(10,2)=0.123026104000000;
128  m_bound(11,2)=0.109213531000000;
129  m_bound(12,2)=0.086256780600000;
130  m_bound(13,2)=0.060543032900000;
131  m_bound(14,2)=0.037511596000000;
132  m_bound(15,2)=0.020034759300000;
133  m_bound(16,2)=0.008770186980000;
134  m_bound(17,2)=0.002753100850000;
135  m_bound(18,2)=0.000397791059000;
136  m_bound(19,2)=0;
137 
138  m_bound(0,3)=-CMath::INFTY;
139  m_bound(1,3)=-8.575194939999999;
140  m_bound(2,3)=-5.933689180000000;
141  m_bound(3,3)=-4.525933600000000;
142  m_bound(4,3)=-3.528107790000000;
143  m_bound(5,3)=-2.751548540000000;
144  m_bound(6,3)=-2.097898790000000;
145  m_bound(7,3)=-1.519690830000000;
146  m_bound(8,3)=-0.989533382000000;
147  m_bound(9,3)=-0.491473077000000;
148  m_bound(10,3)=0;
149  m_bound(11,3)=0.491473077000000;
150  m_bound(12,3)=0.989533382000000;
151  m_bound(13,3)=1.519690830000000;
152  m_bound(14,3)=2.097898790000000;
153  m_bound(15,3)=2.751548540000000;
154  m_bound(16,3)=3.528107790000000;
155  m_bound(17,3)=4.525933600000000;
156  m_bound(18,3)=5.933689180000000;
157  m_bound(19,3)=8.575194939999999;
158 
159  m_bound(0,4)=-8.575194939999999;
160  m_bound(1,4)=-5.933689180000000;
161  m_bound(2,4)=-4.525933600000000;
162  m_bound(3,4)=-3.528107790000000;
163  m_bound(4,4)=-2.751548540000000;
164  m_bound(5,4)=-2.097898790000000;
165  m_bound(6,4)=-1.519690830000000;
166  m_bound(7,4)=-0.989533382000000;
167  m_bound(8,4)=-0.491473077000000;
168  m_bound(9,4)=0;
169  m_bound(10,4)=0.491473077000000;
170  m_bound(11,4)=0.989533382000000;
171  m_bound(12,4)=1.519690830000000;
172  m_bound(13,4)=2.097898790000000;
173  m_bound(14,4)=2.751548540000000;
174  m_bound(15,4)=3.528107790000000;
175  m_bound(16,4)=4.525933600000000;
176  m_bound(17,4)=5.933689180000000;
177  m_bound(18,4)=8.575194939999999;
178  m_bound(19,4)= CMath::INFTY;
179 }
180 
182 {
183  //This function is based on the Matlab code,
184  //function [f, gm, gv] = Ellp(m, v, bound, ind), to compute f
185  //and the formula of the appendix
186 
187  const Map<VectorXd> eigen_c(m_bound.get_column_vector(0), m_bound.num_rows);
188  const Map<VectorXd> eigen_b(m_bound.get_column_vector(1), m_bound.num_rows);
189  const Map<VectorXd> eigen_a(m_bound.get_column_vector(2), m_bound.num_rows);
190  const Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
191  const Map<VectorXd> eigen_s2(m_s2.vector, m_s2.vlen);
192 
193  Map<VectorXd> eigen_l(m_bound.get_column_vector(3), m_bound.num_rows);
194  Map<VectorXd> eigen_h(m_bound.get_column_vector(4), m_bound.num_rows);
195 
196  index_t num_rows = m_bound.num_rows;
197  index_t num_cols = m_mu.vlen;
198 
199  const Map<MatrixXd> eigen_cdf_diff(m_cdf_diff.matrix, num_rows, num_cols);
200  const Map<MatrixXd> eigen_pl(m_pl.matrix, num_rows, num_cols);
201  const Map<MatrixXd> eigen_ph(m_ph.matrix, num_rows, num_cols);
202 
203  float64_t l_bak = eigen_l(0);
204  //l(1) = 0;
205  eigen_l(0) = 0;
206 
207  float64_t h_bak = eigen_h(eigen_h.size()-1);
208  //h(end) = 0;
209  eigen_h(eigen_h.size()-1) = 0;
210 
211  //ex0 = ch-cl;
212  const Map<MatrixXd> & eigen_ex0 = eigen_cdf_diff;
213 
214  //%ex1= v.*(pl-ph) + m.*(ch-cl);
215  MatrixXd eigen_ex1 = ((eigen_pl - eigen_ph).array().rowwise()*eigen_s2.array().transpose()
216  + eigen_cdf_diff.array().rowwise()*eigen_mu.array().transpose()).matrix();
217 
218  //ex2 = bsxfun(@times, v, (bsxfun(@plus, l, m)).*pl - (bsxfun(@plus, h, m)).*ph) + bsxfun(@times, (v+m.^2), ex0);
219 
220  //bsxfun(@plus, l, m)).*pl - (bsxfun(@plus, h, m)).*ph
221  MatrixXd eigen_ex2 = ((eigen_mu.replicate(1,eigen_l.rows()).array().transpose().colwise() + eigen_l.array())*eigen_pl.array()
222  - (eigen_mu.replicate(1,eigen_h.rows()).array().transpose().colwise() + eigen_h.array())*eigen_ph.array()).matrix();
223 
224  //bsxfun(@times, v, (bsxfun(@plus, l, m).*pl - bsxfun(@plus, h, m).*ph
225  eigen_ex2 = (eigen_ex2.array().rowwise()*eigen_s2.array().transpose()).matrix();
226 
227  //ex2 = bsxfun(@times, v, (bsxfun(@plus, l, m)).*pl - (bsxfun(@plus, h, m)).*ph) + bsxfun(@times, (v+m.^2), ex0);
228  eigen_ex2 += (eigen_cdf_diff.array().rowwise()*(eigen_mu.array().pow(2)
229  + eigen_s2.array()).transpose()).matrix();
230 
232  Map<VectorXd> eigen_f(f.vector, f.vlen);
233 
234  //%f = sum((a.*ex2 + b.*ex1 + c.*ex0),1);
235  eigen_f = (eigen_ex2.array().colwise()*eigen_a.array()
236  + eigen_ex1.array().colwise()*eigen_b.array()
237  + eigen_ex0.array().colwise()*eigen_c.array()).colwise().sum().matrix();
238 
239  eigen_l(0) = l_bak;
240  eigen_h(eigen_h.size()-1) = h_bak;
241 
242  Map<VectorXd>eigen_lab(m_lab.vector, m_lab.vlen);
243  eigen_f = eigen_lab.cwiseProduct(eigen_mu) - eigen_f;
244  return f;
245 }
246 
247 
249  const TParameter* param) const
250 {
251  //This function is based on the Matlab code
252  //function [f, gm, gv] = Ellp(m, v, bound, ind), to compute gm and gv
253  //and the formula of the appendix
254  REQUIRE(param, "Param is required (param should not be NULL)\n");
255  REQUIRE(param->m_name, "Param name is required (param->m_name should not be NULL)\n");
256  //We take the derivative wrt to param. Only mu or sigma2 can be the param
257  REQUIRE(!(strcmp(param->m_name, "mu") && strcmp(param->m_name, "sigma2")),
258  "Can't compute derivative of the variational expection ",
259  "of log LogitLikelihood using the piecewise bound ",
260  "wrt %s.%s parameter. The function only accepts mu and sigma2 as parameter",
261  get_name(), param->m_name);
262 
263  const Map<VectorXd> eigen_c(m_bound.get_column_vector(0), m_bound.num_rows);
264  const Map<VectorXd> eigen_b(m_bound.get_column_vector(1), m_bound.num_rows);
265  const Map<VectorXd> eigen_a(m_bound.get_column_vector(2), m_bound.num_rows);
266  const Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
267  const Map<VectorXd> eigen_s2(m_s2.vector, m_s2.vlen);
268 
269  Map<VectorXd> eigen_l(m_bound.get_column_vector(3), m_bound.num_rows);
270  Map<VectorXd> eigen_h(m_bound.get_column_vector(4), m_bound.num_rows);
271 
272  index_t num_rows = m_bound.num_rows;
273  index_t num_cols = m_mu.vlen;
274 
275  const Map<MatrixXd> eigen_cdf_diff(m_cdf_diff.matrix, num_rows, num_cols);
276  const Map<MatrixXd> eigen_pl(m_pl.matrix, num_rows, num_cols);
277  const Map<MatrixXd> eigen_ph(m_ph.matrix, num_rows, num_cols);
278  const Map<MatrixXd> eigen_weighted_pdf_diff(m_weighted_pdf_diff.matrix, num_rows, num_cols);
279  const Map<MatrixXd> eigen_h2_plus_s2(m_h2_plus_s2.matrix, num_rows, num_cols);
280  const Map<MatrixXd> eigen_l2_plus_s2(m_l2_plus_s2.matrix, num_rows, num_cols);
281 
282  float64_t l_bak = eigen_l(0);
283  //l(1) = 0;
284  eigen_l(0) = 0;
285  float64_t h_bak = eigen_h(eigen_h.size()-1);
286  //h(end) = 0;
287  eigen_h(eigen_h.size()-1) = 0;
288 
289  SGVector<float64_t> result(m_mu.vlen);
290 
291  if (strcmp(param->m_name, "mu") == 0)
292  {
293  //Compute the derivative wrt mu
294 
295  //bsxfun(@plus, bsxfun(@plus, l.^2, v), v).*pl - bsxfun(@plus, bsxfun(@plus, h.^2, v), v).*ph;
296  MatrixXd eigen_dmu2 = ((eigen_l2_plus_s2.array().rowwise()+eigen_s2.array().transpose())*eigen_pl.array()
297  - (eigen_h2_plus_s2.array().rowwise()+eigen_s2.array().transpose())*eigen_ph.array()).matrix();
298  //bsxfun(@times, ch-cl, (2.0*mu))
299  eigen_dmu2 += (eigen_cdf_diff.array().rowwise()*(2.0*eigen_mu).array().transpose()).matrix();
300 
301  SGVector<float64_t> & gmu = result;
302  Map<VectorXd> eigen_gmu(gmu.vector, gmu.vlen);
303 
304  //gmu = bsxfun(@times, dmu2, _a) + bsxfun(@times, pl.*l - ph.*h + ch - chl, b) + bsxfun(@times, pl - ph, c)
305  //gmu = sum(gmu,1)
306  eigen_gmu = ((eigen_dmu2.array().colwise()*eigen_a.array())
307  + ((eigen_weighted_pdf_diff + eigen_cdf_diff).array().colwise()*eigen_b.array())
308  + ( (eigen_pl - eigen_ph).array().colwise()*eigen_c.array())).colwise().sum().matrix();
309 
310  Map<VectorXd>eigen_lab(m_lab.vector, m_lab.vlen);
311  eigen_gmu = eigen_lab - eigen_gmu;
312  }
313  else
314  {
315  //Compute the derivative wrt sigma2
316 
317  //gv_0 = bsxfun(@plus, l, -mu).*pl - bsxfun(@plus, h, -mu).*ph;
318  MatrixXd eigen_gs2_0 = (((-eigen_mu).replicate(1,eigen_l.rows()).array().transpose().colwise() + eigen_l.array())*eigen_pl.array()
319  - ((-eigen_mu).replicate(1,eigen_h.rows()).array().transpose().colwise() + eigen_h.array())*eigen_ph.array()).matrix();
320  //gv_0 = bsxfun(times, gv_0, c);
321  eigen_gs2_0 = (eigen_gs2_0.array().colwise()*eigen_c.array()).matrix();
322 
323  //tmpl = l2_plus_v - bsxfun(@times, l, mu)
324  MatrixXd tmpl = (eigen_l2_plus_s2 - (eigen_mu.replicate(1,eigen_l.rows()).array().transpose().colwise()*eigen_l.array()).matrix()
325  ).cwiseProduct(eigen_pl);
326 
327  //tmph = h2_plus_v - bsxfun(@times, h, mu)
328  MatrixXd tmph = (eigen_h2_plus_s2 - (eigen_mu.replicate(1,eigen_h.rows()).array().transpose().colwise()*eigen_h.array()).matrix()
329  ).cwiseProduct(eigen_ph);
330 
331  //gv_1 = bsxfun(@times, tmpl - tmph, b);
332  MatrixXd eigen_gs2_1 = ((tmpl - tmph).array().colwise()*eigen_b.array()).matrix();
333 
334  //gv_2 = bsxfun(@times, tmpl, l) - bsxfun(@times, tmph, h);
335  MatrixXd eigen_gs2_2 = (tmpl.array().colwise()*eigen_l.array() - tmph.array().colwise()*eigen_h.array()).matrix();
336 
337  //gv_2 = bsxfun(@times, gv_2, a);
338  eigen_gs2_2 = (eigen_gs2_2.array().colwise()*eigen_a.array()).matrix();
339 
340  SGVector<float64_t> & gs2 = result;
341  Map<VectorXd> eigen_gs2(gs2.vector, gs2.vlen);
342 
343  //gv = (bsxfun(@times, ch - cl + 0.5*pl.*l - ph.*h, a) + bsxfun(@times, gv_0 + gv_1 + gv_2, 1.0/(2.0*v))
344  //gv = sum(gv,1);
345  eigen_gs2 = ((eigen_cdf_diff + 0.5*eigen_weighted_pdf_diff).array().colwise()*eigen_a.array()
346  + (eigen_gs2_0 + eigen_gs2_1 + eigen_gs2_2).array().rowwise()/(2.0*eigen_s2).array().transpose()
347  ).colwise().sum().matrix();
348 
349  //gv = -gv
350  eigen_gs2 = -eigen_gs2;
351  }
352  eigen_l(0) = l_bak;
353  eigen_h(eigen_h.size()-1) = h_bak;
354 
355  return result;
356 }
357 
359  SGVector<float64_t> s2, const CLabels* lab)
360 {
361  bool status = true;
363  if (status)
364  {
366  "Labels must be type of CBinaryLabels\n");
367 
368  m_lab = (((CBinaryLabels*)lab)->get_labels()).clone();
369 
370  //Convert the input label to standard label used in the class
371  //Note that Shogun uses -1 and 1 as labels and this class internally uses
372  //0 and 1 repectively.
373  for(index_t i = 0; i < m_lab.size(); ++i)
374  m_lab[i] = CMath::max(m_lab[i], 0.0);
375 
376  precompute();
377 
378  }
379  return status;
380 }
381 
383 {
385 }
386 
387 void CLogitVGPiecewiseBoundLikelihood::init()
388 {
389  SG_ADD(&m_bound, "bound",
390  "Variational piecewise bound for logit likelihood",
392  SG_ADD(&m_pl, "pdf_l",
393  "The pdf given the lower range and parameters(mu and variance)",
395  SG_ADD(&m_ph, "pdf_h",
396  "The pdf given the higher range and parameters(mu and variance)",
398  SG_ADD(&m_cdf_diff, "cdf_h_minus_cdf_l",
399  "The CDF difference between the lower and higher range given the parameters(mu and variance)",
401  SG_ADD(&m_l2_plus_s2, "l2_plus_sigma2",
402  "The result of l^2 + sigma^2",
404  SG_ADD(&m_h2_plus_s2, "h2_plus_sigma2",
405  "The result of h^2 + sigma^2",
407  SG_ADD(&m_weighted_pdf_diff, "weighted_pdf_diff",
408  "The result of l*pdf(l_norm)-h*pdf(h_norm)",
410 
411  init_likelihood();
412 }
413 
414 void CLogitVGPiecewiseBoundLikelihood::precompute()
415 {
416  //This function is based on the Matlab code
417  //function [f, gm, gv] = Ellp(m, v, bound, ind), to compute common variables later
418  //used in get_variational_expection and get_variational_first_derivative
419 
420  const Map<VectorXd> eigen_c(m_bound.get_column_vector(0), m_bound.num_rows);
421  const Map<VectorXd> eigen_b(m_bound.get_column_vector(1), m_bound.num_rows);
422  const Map<VectorXd> eigen_a(m_bound.get_column_vector(2), m_bound.num_rows);
423  const Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
424  const Map<VectorXd> eigen_s2(m_s2.vector, m_s2.vlen);
425 
426  Map<VectorXd> eigen_l(m_bound.get_column_vector(3), m_bound.num_rows);
427  Map<VectorXd> eigen_h(m_bound.get_column_vector(4), m_bound.num_rows);
428 
429  index_t num_rows = m_bound.num_rows;
430  index_t num_cols = m_mu.vlen;
431 
432  m_pl = SGMatrix<float64_t>(num_rows,num_cols);
433  m_ph = SGMatrix<float64_t>(num_rows,num_cols);
434  m_cdf_diff = SGMatrix<float64_t>(num_rows,num_cols);
435  m_l2_plus_s2 = SGMatrix<float64_t>(num_rows,num_cols);
436  m_h2_plus_s2 = SGMatrix<float64_t>(num_rows,num_cols);
437  m_weighted_pdf_diff = SGMatrix<float64_t>(num_rows,num_cols);
438 
439  Map<MatrixXd> eigen_pl(m_pl.matrix, num_rows, num_cols);
440  Map<MatrixXd> eigen_ph(m_ph.matrix, num_rows, num_cols);
441  Map<MatrixXd> eigen_cdf_diff(m_cdf_diff.matrix, num_rows, num_cols);
442  Map<MatrixXd> eigen_l2_plus_s2(m_l2_plus_s2.matrix, num_rows, num_cols);
443  Map<MatrixXd> eigen_h2_plus_s2(m_h2_plus_s2.matrix, num_rows, num_cols);
444  Map<MatrixXd> eigen_weighted_pdf_diff(m_weighted_pdf_diff.matrix, num_rows, num_cols);
445 
446  SGMatrix<float64_t> zl(num_rows, num_cols);
447  Map<MatrixXd> eigen_zl(zl.matrix, num_rows, num_cols);
448  SGMatrix<float64_t> zh(num_rows, num_cols);
449  Map<MatrixXd> eigen_zh(zh.matrix, num_rows, num_cols);
450 
451  //bsxfun(@minus,l,m)
452  eigen_zl = ((-eigen_mu).replicate(1,eigen_l.rows()).array().transpose().colwise() + eigen_l.array()).matrix();
453  //bsxfun(@minus,h,m)
454  eigen_zh = ((-eigen_mu).replicate(1,eigen_h.rows()).array().transpose().colwise() + eigen_h.array()).matrix();
455 
456  VectorXd eigen_s_inv = eigen_s2.array().sqrt().inverse().matrix();
457 
458  //zl = bsxfun(@times, bsxfun(@minus,l,m), 1./sqrt(v))
459  eigen_zl = (eigen_zl.array().rowwise()*eigen_s_inv.array().transpose()).matrix();
460  //zh = bsxfun(@times, bsxfun(@minus,h,m), 1./sqrt(v))
461  eigen_zh = (eigen_zh.array().rowwise()*eigen_s_inv.array().transpose()).matrix();
462 
463  //Usually we use pdf in log-domain and the log_sum_exp trick
464  //to avoid numerical underflow in particular for IID samples
465  for (index_t r = 0; r < zl.num_rows; r++)
466  {
467  for (index_t c = 0; c < zl.num_cols; c++)
468  {
469  if (CMath::abs(zl(r, c)) == CMath::INFTY)
470  m_pl(r, c) = 0;
471  else
473 
474  if (CMath::abs(zh(r, c)) == CMath::INFTY)
475  m_ph(r, c) = 0;
476  else
478  }
479  }
480 
481  //pl = bsxfun(@times, normpdf(zl), 1./sqrt(v));
482  eigen_pl = (eigen_pl.array().rowwise()*eigen_s_inv.array().transpose()).matrix();
483  //ph = bsxfun(@times, normpdf(zh), 1./sqrt(v));
484  eigen_ph = (eigen_ph.array().rowwise()*eigen_s_inv.array().transpose()).matrix();
485 
486  SGMatrix<float64_t> & cl = zl;
487  SGMatrix<float64_t> & ch = zh;
488 
489  for (index_t r = 0; r < zl.num_rows; r++)
490  {
491  for (index_t c = 0; c < zl.num_cols; c++)
492  {
493  //cl = 0.5*erf(zl/sqrt(2)); %normal cdf -const
494  cl(r, c) = CStatistics::normal_cdf(zl(r, c)) - 0.5;
495  //ch = 0.5*erf(zl/sqrt(2)); %normal cdf -const
496  ch(r, c) = CStatistics::normal_cdf(zh(r, c)) - 0.5;
497  }
498  }
499 
500  Map<MatrixXd> eigen_cl(cl.matrix, num_rows, num_cols);
501  Map<MatrixXd> eigen_ch(ch.matrix, num_rows, num_cols);
502 
503  eigen_cdf_diff = eigen_ch - eigen_cl;
504 
505  float64_t l_bak = eigen_l(0);
506  eigen_l(0) = 0;
507 
508  float64_t h_bak = eigen_h(eigen_h.size()-1);
509  eigen_h(eigen_h.size()-1) = 0;
510 
511  //bsxfun(@plus, l.^2, v)
512  eigen_l2_plus_s2 = (eigen_s2.replicate(1,eigen_l.rows()).array().transpose().colwise() + eigen_l.array().pow(2)).matrix();
513  //bsxfun(@plus, h.^2, v)
514  eigen_h2_plus_s2 = (eigen_s2.replicate(1,eigen_h.rows()).array().transpose().colwise() + eigen_h.array().pow(2)).matrix();
515  //pl.*l - ph.*h
516  eigen_weighted_pdf_diff = (eigen_pl.array().colwise() * eigen_l.array() - eigen_ph.array().colwise() * eigen_h.array()).matrix();
517 
518  eigen_l(0) = l_bak;
519  eigen_h(eigen_h.size()-1) = h_bak;
520 }
521 
523  const TParameter* param) const
524 {
525  return SGVector<float64_t>();
526 }
527 
528 } /* namespace shogun */
virtual bool set_variational_distribution(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab)
Class that models Logit likelihood.
virtual ELabelType get_label_type() const =0
binary labels +1/-1
Definition: LabelTypes.h:18
int32_t index_t
Definition: common.h:62
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
static const float64_t INFTY
infinity
Definition: Math.h:2048
virtual CSGObject * clone()
Definition: SGObject.cpp:747
The variational Gaussian Likelihood base class. The variational distribution is Gaussian.
Definition: SGMatrix.h:20
parameter struct
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_rows
Definition: SGMatrix.h:374
virtual void set_variational_bound(SGMatrix< float64_t > bound)
index_t vlen
Definition: SGVector.h:494
virtual void set_likelihood(CLikelihoodModel *lik)
virtual SGVector< float64_t > get_variational_first_derivative(const TParameter *param) const
virtual SGVector< float64_t > get_first_derivative_wrt_hyperparameter(const TParameter *param) const
double float64_t
Definition: common.h:50
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:113
static T max(T a, T b)
Definition: Math.h:168
static float64_t univariate_log_pdf(float64_t sample, float64_t mu=0.0, float64_t sigma2=1.0)
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
static float64_t exp(float64_t x)
Definition: Math.h:621
static float64_t normal_cdf(float64_t x, float64_t std_dev=1)
Definition: Statistics.cpp:508
Binary Labels for binary classification.
Definition: BinaryLabels.h:37
virtual bool set_variational_distribution(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab)
#define SG_ADD(...)
Definition: SGObject.h:84
static T abs(T a)
Definition: Math.h:179

SHOGUN Machine Learning Toolbox - Documentation