SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
MatrixOperations.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  * Code adapted from
31  * Gaussian Process Machine Learning Toolbox
32  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
33  */
34 
36 
37 #ifdef HAVE_EIGEN3
38 #include <shogun/lib/SGVector.h>
39 #include <shogun/lib/SGMatrix.h>
41 
42 using namespace Eigen;
43 
44 namespace shogun
45 {
46 
47 SGMatrix<float64_t> CMatrixOperations::get_choleksy(SGVector<float64_t> W,
49 {
50  Map<VectorXd> eigen_W(W.vector, W.vlen);
51  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
52  Map<MatrixXd> eigen_kernel(kernel.matrix, kernel.num_rows, kernel.num_cols);
53 
54  REQUIRE(eigen_W.rows()==eigen_sW.rows(),
55  "The length of W (%d) and sW (%d) should be the same\n",
56  eigen_W.rows(), eigen_sW.rows());
57  REQUIRE(eigen_kernel.rows()==eigen_kernel.cols(),
58  "Kernel should a sqaure matrix, row (%d) col (%d)\n",
59  eigen_kernel.rows(), eigen_kernel.cols());
60  REQUIRE(eigen_kernel.rows()==eigen_W.rows(),
61  "The dimension between kernel (%d) and W (%d) should be matched\n",
62  eigen_kernel.rows(), eigen_W.rows());
63 
64  SGMatrix<float64_t> L(eigen_W.rows(), eigen_W.rows());
65 
66  Map<MatrixXd> eigen_L(L.matrix, L.num_rows, L.num_cols);
67 
68  if (eigen_W.minCoeff()<0)
69  {
70  // compute inverse of diagonal noise: iW = 1/W
71  VectorXd eigen_iW=(VectorXd::Ones(eigen_W.rows())).cwiseQuotient(eigen_W);
72 
73  FullPivLU<MatrixXd> lu(
74  eigen_kernel*CMath::sq(scale)+MatrixXd(eigen_iW.asDiagonal()));
75 
76  // compute cholesky: L = -(K + iW)^-1
77  eigen_L=-lu.inverse();
78  }
79  else
80  {
81  // compute cholesky: L = chol(sW * sW' .* K + I)
82  LLT<MatrixXd> llt(
83  (eigen_sW*eigen_sW.transpose()).cwiseProduct(eigen_kernel*CMath::sq(scale))+
84  MatrixXd::Identity(eigen_kernel.rows(), eigen_kernel.cols()));
85 
86  eigen_L=llt.matrixU();
87  }
88 
89  return L;
90 }
91 
92 SGMatrix<float64_t> CMatrixOperations::get_inverse(SGMatrix<float64_t> L, SGMatrix<float64_t> kernel,
94 {
95  Map<MatrixXd> eigen_L(L.matrix, L.num_rows, L.num_cols);
96  Map<MatrixXd> eigen_kernel(kernel.matrix, kernel.num_rows, kernel.num_cols);
97  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
99  Map<MatrixXd> eigen_V(V.matrix, V.num_rows, V.num_cols);
100 
101  // compute V = L^(-1) * W^(1/2) * K, using upper triangular factor L^T
102  eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
103  eigen_sW.asDiagonal()*eigen_kernel*CMath::sq(scale));
104 
105  return get_inverse(L, kernel, sW, V, scale);
106 }
107 
108 SGMatrix<float64_t> CMatrixOperations::get_inverse(SGMatrix<float64_t> L,
111 {
112  Map<MatrixXd> eigen_L(L.matrix, L.num_rows, L.num_cols);
113  Map<MatrixXd> eigen_kernel(kernel.matrix, kernel.num_rows, kernel.num_cols);
114  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
115  Map<MatrixXd> eigen_V(V.matrix, V.num_rows, V.num_cols);
116 
117  REQUIRE(eigen_kernel.rows()==eigen_kernel.cols(),
118  "Kernel should a sqaure matrix, row (%d) col (%d)\n",
119  eigen_kernel.rows(), eigen_kernel.cols());
120  REQUIRE(eigen_L.rows()==eigen_L.cols(),
121  "L should a sqaure matrix, row (%d) col (%d)\n",
122  eigen_L.rows(), eigen_L.cols());
123  REQUIRE(eigen_kernel.rows()==eigen_sW.rows(),
124  "The dimension between kernel (%d) and sW (%d) should be matched\n",
125  eigen_kernel.rows(), eigen_sW.rows());
126  REQUIRE(eigen_L.rows()==eigen_sW.rows(),
127  "The dimension between L (%d) and sW (%d) should be matched\n",
128  eigen_L.rows(), eigen_sW.rows());
129 
130 
131  SGMatrix<float64_t> tmp(eigen_L.rows(), eigen_L.cols());
132  Map<MatrixXd> eigen_tmp(tmp.matrix, tmp.num_rows, tmp.num_cols);
133 
134  // compute covariance matrix of the posterior:
135  // Sigma = K - K * W^(1/2) * (L * L^T)^(-1) * W^(1/2) * K =
136  // K - (K * W^(1/2)) * (L^T)^(-1) * L^(-1) * W^(1/2) * K =
137  // K - (W^(1/2) * K)^T * (L^(-1))^T * L^(-1) * W^(1/2) * K = K - V^T * V
138  eigen_tmp=eigen_kernel*CMath::sq(scale)-eigen_V.adjoint()*eigen_V;
139  return tmp;
140 }
141 
142 } /* namespace shogun */
143 #endif /* HAVE_EIGEN3 */
Definition: SGMatrix.h:20
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:378
index_t num_rows
Definition: SGMatrix.h:376
index_t vlen
Definition: SGVector.h:494
double float64_t
Definition: common.h:50
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
Definition: Core.h:93

SHOGUN Machine Learning Toolbox - Documentation