SHOGUN  4.1.0
 全部  命名空间 文件 函数 变量 类型定义 枚举 枚举值 友元 宏定义  
GaussianProcessMachine.cpp
浏览该文件的文档.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2014 Wu Lin
4  * Written (W) 2013 Roman Votyakov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice, this
11  * list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * The views and conclusions contained in the software and documentation are those
28  * of the authors and should not be interpreted as representing official policies,
29  * either expressed or implied, of the Shogun Development Team.
30  *
31  * Code adapted from
32  * Gaussian Process Machine Learning Toolbox
33  * http://www.gaussianprocess.org/gpml/code/matlab/doc/
34  * and
35  * https://gist.github.com/yorkerlin/8a36e8f9b298aa0246a4
36  */
37 
38 #include <shogun/lib/config.h>
39 
40 #ifdef HAVE_EIGEN3
41 
44 #include <shogun/kernel/Kernel.h>
46 
48 
49 using namespace shogun;
50 using namespace Eigen;
51 
53 {
54  init();
55 }
56 
58 {
59  init();
60  set_inference_method(method);
61 }
62 
63 void CGaussianProcessMachine::init()
64 {
65  m_method=NULL;
66 
67  SG_ADD((CSGObject**) &m_method, "inference_method", "Inference method",
68  MS_AVAILABLE);
69 }
70 
72 {
73  SG_UNREF(m_method);
74 }
75 
77 {
78  REQUIRE(m_method, "Inference method should not be NULL\n")
79 
80  CFeatures* feat;
81 
82  CSingleSparseInferenceBase* sparse_method=
83  dynamic_cast<CSingleSparseInferenceBase *>(m_method);
84  // use inducing features for sparse inference method
85  if (sparse_method)
86  {
87  sparse_method->optimize_inducing_features();
88  feat=sparse_method->get_inducing_features();
89  }
90  else
91  feat=m_method->get_features();
92 
93  // get kernel and compute kernel matrix: K(feat, data)*scale^2
94  CKernel* training_kernel=m_method->get_kernel();
95  CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
96  SG_UNREF(training_kernel);
97 
98  kernel->init(feat, data);
99 
100  // get kernel matrix and create eigen representation of it
101  SGMatrix<float64_t> k_trts=kernel->get_kernel_matrix();
102  Map<MatrixXd> eigen_Ks(k_trts.matrix, k_trts.num_rows, k_trts.num_cols);
103 
104  // compute Ks=Ks*scale^2
105  eigen_Ks*=CMath::sq(m_method->get_scale());
106 
107  // cleanup
108  SG_UNREF(feat);
109  SG_UNREF(kernel);
110 
111  // get alpha and create eigen representation of it
112  SGVector<float64_t> alpha=m_method->get_alpha();
113  Map<VectorXd> eigen_alpha(alpha.vector, alpha.vlen);
114 
115  // get mean and create eigen representation of it
116  CMeanFunction* mean_function=m_method->get_mean();
117  SGVector<float64_t> mean=mean_function->get_mean_vector(data);
118  Map<VectorXd> eigen_mean(mean.vector, mean.vlen);
119  SG_UNREF(mean_function);
120 
121  const index_t C=alpha.vlen/k_trts.num_rows;
122  const index_t n=k_trts.num_rows;
123  const index_t m=k_trts.num_cols;
124 
125  // compute mean: mu=Ks'*alpha+m
126  SGVector<float64_t> mu(C*m);
127  Map<MatrixXd> eigen_mu_matrix(mu.vector,C,m);
128 
129  for(index_t bl=0; bl<C; bl++)
130  eigen_mu_matrix.block(bl,0,1,m)=(eigen_Ks.adjoint()*eigen_alpha.block(bl*n,0,n,1)+eigen_mean).transpose();
131 
132  return mu;
133 }
134 
136  CFeatures* data)
137 {
138  REQUIRE(m_method, "Inference method should not be NULL\n")
139 
140  CFeatures* feat;
141 
142  bool is_sparse=false;
143  CSingleSparseInferenceBase* sparse_method=
144  dynamic_cast<CSingleSparseInferenceBase *>(m_method);
145  // use inducing features for sparse inference method
146  if (sparse_method)
147  {
148  sparse_method->optimize_inducing_features();
149  feat=sparse_method->get_inducing_features();
150  is_sparse=true;
151  }
152  else
153  feat=m_method->get_features();
154 
155  SG_REF(data);
156 
157  // get kernel and compute kernel matrix: K(data, data)*scale^2
158  CKernel* training_kernel=m_method->get_kernel();
159  CKernel* kernel=CKernel::obtain_from_generic(training_kernel->clone());
160  SG_UNREF(training_kernel);
161  kernel->init(data, data);
162 
163  // get kernel matrix and create eigen representation of it
164  SGVector<float64_t> k_tsts=kernel->get_kernel_diagonal();
165  Map<VectorXd> eigen_Kss_diag(k_tsts.vector, k_tsts.vlen);
166 
167  // compute Kss=Kss*scale^2
168  eigen_Kss_diag*=CMath::sq(m_method->get_scale());
169 
170  // compute kernel matrix: K(feat, data)*scale^2
171  kernel->init(feat, data);
172 
173  // get kernel matrix and create eigen representation of it
174  SGMatrix<float64_t> k_trts=kernel->get_kernel_matrix();
175  Map<MatrixXd> eigen_Ks(k_trts.matrix, k_trts.num_rows, k_trts.num_cols);
176 
177  // compute Ks=Ks*scale^2
178  eigen_Ks*=CMath::sq(m_method->get_scale());
179 
180  // cleanup
181  SG_UNREF(kernel);
182  SG_UNREF(feat);
183  SG_UNREF(data);
184 
185  // get shogun representation of cholesky and create eigen representation
186  SGMatrix<float64_t> L=m_method->get_cholesky();
187  Map<MatrixXd> eigen_L(L.matrix, L.num_rows, L.num_cols);
188 
189  SGVector<float64_t> alpha=m_method->get_alpha();
190  const index_t n=k_trts.num_rows;
191  const index_t m=k_tsts.vlen;
192  const index_t C=alpha.vlen/n;
193  // result variance vector
194  SGVector<float64_t> s2(m*C*C);
195  Map<VectorXd> eigen_s2(s2.vector, s2.vlen);
196 
197  if (eigen_L.isUpperTriangular() && !is_sparse)
198  {
199  if (alpha.vlen==L.num_rows)
200  {
201  //binary case
202  // get shogun of diagonal sigma vector and create eigen representation
203  SGVector<float64_t> sW=m_method->get_diagonal_vector();
204  Map<VectorXd> eigen_sW(sW.vector, sW.vlen);
205  // solve L' * V = sW * Ks and compute V.^2
206  MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
207  eigen_sW.asDiagonal()*eigen_Ks);
208  MatrixXd eigen_sV=eigen_V.cwiseProduct(eigen_V);
209 
210  eigen_s2=eigen_Kss_diag-eigen_sV.colwise().sum().adjoint();
211  }
212  else
213  {
214  if (m_method->supports_multiclass())
215  {
216  //multiclass case
217  //see the reference code of the gist link, which is based on the algorithm 3.4 of the GPML textbook
218  Map<MatrixXd> &eigen_M=eigen_L;
219  eigen_s2.fill(0);
220 
221  SGMatrix<float64_t> E=m_method->get_multiclass_E();
222  Map<MatrixXd> eigen_E(E.matrix, E.num_rows, E.num_cols);
223  ASSERT(E.num_cols==alpha.vlen);
224  for(index_t bl_i=0; bl_i<C; bl_i++)
225  {
226  //n by m
227  MatrixXd bi=eigen_E.block(0,bl_i*n,n,n)*eigen_Ks;
228  MatrixXd c_cav=eigen_M.triangularView<Upper>().adjoint().solve(bi);
229  c_cav=eigen_M.triangularView<Upper>().solve(c_cav);
230 
231  for(index_t bl_j=0; bl_j<C; bl_j++)
232  {
233  MatrixXd bj=eigen_E.block(0,bl_j*n,n,n)*eigen_Ks;
234  for (index_t idx_m=0; idx_m<m; idx_m++)
235  eigen_s2[bl_j+(bl_i+idx_m*C)*C]=(bj.block(0,idx_m,n,1).array()*c_cav.block(0,idx_m,n,1).array()).sum();
236  }
237  for (index_t idx_m=0; idx_m<m; idx_m++)
238  eigen_s2[bl_i+(bl_i+idx_m*C)*C]+=eigen_Kss_diag(idx_m)-(eigen_Ks.block(0,idx_m,n,1).array()*bi.block(0,idx_m,n,1).array()).sum();
239  }
240  }
241  else
242  {
243  SG_ERROR("Unsupported inference method!\n");
244  return s2;
245  }
246  }
247  }
248  else
249  {
250  // M = Ks .* (L * Ks)
251  MatrixXd eigen_M=eigen_Ks.cwiseProduct(eigen_L*eigen_Ks);
252  eigen_s2=eigen_Kss_diag+eigen_M.colwise().sum().adjoint();
253  }
254 
255  return s2;
256 }
257 
258 #endif /* HAVE_EIGEN3 */
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:98
The Inference Method base class.
int32_t index_t
Definition: common.h:62
virtual CSGObject * clone()
Definition: SGObject.cpp:714
SGVector< float64_t > get_posterior_variances(CFeatures *data)
static T sq(T x)
Definition: Math.h:450
Definition: SGMatrix.h:20
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
SGVector< float64_t > get_posterior_means(CFeatures *data)
index_t num_cols
Definition: SGMatrix.h:378
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
An abstract class of the mean function.
Definition: MeanFunction.h:49
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:219
#define SG_REF(x)
Definition: SGObject.h:51
index_t num_rows
Definition: SGMatrix.h:376
index_t vlen
Definition: SGVector.h:494
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
The sparse inference base class for classification and regression for 1-D labels (1D regression and b...
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:896
#define SG_UNREF(x)
Definition: SGObject.h:52
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual CFeatures * get_inducing_features()
The class Features is the base class of all feature objects.
Definition: Features.h:68
SGVector< float64_t > get_kernel_diagonal(SGVector< float64_t > preallocated=SGVector< float64_t >())
Definition: Kernel.h:230
The Kernel base class.
Definition: Kernel.h:158
#define SG_ADD(...)
Definition: SGObject.h:81

SHOGUN 机器学习工具包 - 项目文档