SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
KLLowerTriangularInferenceMethod.cpp
Go to the documentation of this file.
1  /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  * Code adapted from
31  * http://hannes.nickisch.org/code/approxXX.tar.gz
32  * and Gaussian Process Machine Learning Toolbox
33  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
34  * and the reference paper is
35  * Challis, Edward, and David Barber.
36  * "Concave Gaussian variational approximations for inference in large-scale Bayesian linear models."
37  * International conference on Artificial Intelligence and Statistics. 2011.
38  *
39  * This code specifically adapted from function in approxKL.m and infKL.m
40  */
41 
43 
44 #ifdef HAVE_EIGEN3
48 
49 using namespace Eigen;
50 
51 namespace shogun
52 {
53 
54 CKLLowerTriangularInferenceMethod::CKLLowerTriangularInferenceMethod() : CKLInferenceMethod()
55 {
56  init();
57 }
58 
60  CFeatures* feat, CMeanFunction* m, CLabels* lab, CLikelihoodModel* mod)
61  : CKLInferenceMethod(kern, feat, m, lab, mod)
62 {
63  init();
64 }
65 
66 void CKLLowerTriangularInferenceMethod::init()
67 {
68  SG_ADD(&m_InvK_Sigma, "invk_Sigma",
69  "K^{-1}Sigma'",
71  SG_ADD(&m_mean_vec, "mean_vec",
72  "The mean vector generated from mean function",
74  SG_ADD(&m_log_det_Kernel, "log_det_kernel",
75  "The Log-determinant of Kernel",
77 
78  SG_ADD(&m_Kernel_LsD, "L_sqrt_D",
79  "The L*sqrt(D) matrix, where L and D are defined in LDLT factorization on Kernel*sq(m_scale)",
81  SG_ADD(&m_Kernel_P, "Permutation_P",
82  "The permutation sequence of P, where P are defined in LDLT factorization on Kernel*sq(m_scale)",
85 }
86 
88 {
89 }
90 
92 {
96  return SGVector<float64_t>();
97 }
98 
100 {
104 }
105 
107 {
108  Eigen::LDLT<Eigen::MatrixXd> ldlt=update_init_helper();
109  MatrixXd Kernel_D=ldlt.vectorD();
110  MatrixXd Kernel_L=ldlt.matrixL();
112  m_Kernel_LsD.zero();
113  Map<MatrixXd> eigen_Kernel_LsD(m_Kernel_LsD.matrix, m_Kernel_LsD.num_rows, m_Kernel_LsD.num_cols);
114  eigen_Kernel_LsD.triangularView<Lower>()=Kernel_L*Kernel_D.array().sqrt().matrix().asDiagonal();
115  m_log_det_Kernel=2.0*eigen_Kernel_LsD.diagonal().array().abs().log().sum();
116 
118  for (index_t i=0; i<m_Kernel_P.vlen; i++)
119  m_Kernel_P[i]=i;
120  Map<VectorXi> eigen_Kernel_P(m_Kernel_P.vector, m_Kernel_P.vlen);
121  eigen_Kernel_P=ldlt.transpositionsP()*eigen_Kernel_P;
122 
124 }
125 
127 {
128  Map<VectorXi> eigen_Kernel_P(m_Kernel_P.vector, m_Kernel_P.vlen);
129  Map<MatrixXd> eigen_Kernel_LsD(m_Kernel_LsD.matrix, m_Kernel_LsD.num_rows, m_Kernel_LsD.num_cols);
130 
131  //re-construct the Permutation Matrix
132  PermutationMatrix<Dynamic> P(m_Kernel_P.vlen);
133  P.setIdentity();
135  for (index_t i=0; i<tmp.vlen; i++)
136  {
137  while(tmp[i]>i)
138  {
139  P.applyTranspositionOnTheLeft(i,tmp[i]);
140  index_t idx=tmp[i];
141  tmp[i]=tmp[idx];
142  tmp[idx]=idx;
143  }
144  }
145  P=P.transpose();
146  //(P'LDL'P)\eigen_A
147  MatrixXd tmp1=P*eigen_A;
148  MatrixXd tmp2=eigen_Kernel_LsD.triangularView<Lower>().solve(tmp1);
149  MatrixXd tmp3=eigen_Kernel_LsD.triangularView<Lower>().transpose().solve(tmp2);
150  return P.transpose()*tmp3;
151 }
152 
154 {
155  Map<VectorXd> eigen_alpha(m_alpha.vector, m_mu.vlen);
156  Map<VectorXd> eigen_mu(m_mu.vector, m_mu.vlen);
157  Map<MatrixXd> eigen_InvK_Sigma(m_InvK_Sigma.matrix, m_InvK_Sigma.num_rows, m_InvK_Sigma.num_cols);
158 
159  //dnlZ(j)=0.5*sum(sum(dK.*(K\((eye(n)- (invK_V+alpha*m'))')),2),1);
160  MatrixXd tmp1=eigen_InvK_Sigma+eigen_alpha*(eigen_mu.transpose());
161  MatrixXd tmp2=(MatrixXd::Identity(m_ktrtr.num_rows,m_ktrtr.num_cols)-tmp1).transpose();
162  MatrixXd tmp3=solve_inverse(tmp2);
163  return 0.5*(tmp3.array()*eigen_dK.array()).sum();
164 }
165 
167 {
171 }
172 
174 {
175  update_Sigma();
177 
179  Map<MatrixXd> eigen_L(m_L.matrix, m_L.num_rows, m_L.num_cols);
180  Map<MatrixXd> eigen_InvK_Sigma(m_InvK_Sigma.matrix, m_InvK_Sigma.num_rows, m_InvK_Sigma.num_cols);
181  MatrixXd tmp2=(eigen_InvK_Sigma-MatrixXd::Identity(m_ktrtr.num_rows,m_ktrtr.num_cols)).transpose();
182 
183  eigen_L=solve_inverse(tmp2);
184 }
185 
186 } /* namespace shogun */
187 
188 #endif /* HAVE_EIGEN3 */

SHOGUN Machine Learning Toolbox - Documentation