SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
LDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 2014 Abhijeet Kislay
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 #include <shogun/lib/config.h>
12 
13 #ifdef HAVE_EIGEN3
14 #include <shogun/lib/common.h>
15 #include <shogun/machine/Machine.h>
17 #include <shogun/classifier/LDA.h>
18 #include <shogun/labels/Labels.h>
22 
23 using namespace Eigen;
24 using namespace shogun;
25 
26 CLDA::CLDA(float64_t gamma, ELDAMethod method)
28 {
29  init();
30  m_method=method;
31  m_gamma=gamma;
32 }
33 
35  CLabels *trainlab, ELDAMethod method)
36  :CLinearMachine(), m_gamma(gamma)
37 {
38  init();
39  set_features(traindat);
40  set_labels(trainlab);
41  m_method=method;
42  m_gamma=gamma;
43 }
44 
45 void CLDA::init()
46 {
48  m_gamma=0;
49  SG_ADD((machine_int_t*) &m_method, "m_method",
50  "Method used for LDA calculation", MS_NOT_AVAILABLE);
51  SG_ADD((machine_int_t*) &m_gamma, "m_gamma",
52  "Regularization parameter", MS_NOT_AVAILABLE);
53 }
54 
56 {
57 }
58 
60 {
61  REQUIRE(m_labels, "Labels for the given features are not specified!\n")
62  REQUIRE(m_labels->get_label_type()==LT_BINARY, "The labels should of type"
63  " CBinaryLabels! you provided %s \n",m_labels->get_name())
64 
65  if(data)
66  {
67  if(!data->has_property(FP_DOT))
68  SG_ERROR("Specified features are not of type CDotFeatures\n")
69  set_features((CDotFeatures*) data);
70  }
71 
72  REQUIRE(features, "Features are not provided!\n")
73  SGVector<int32_t>train_labels=((CBinaryLabels *)m_labels)->get_int_labels();
74  REQUIRE(train_labels.vector,"Provided Labels are empty!\n")
75 
77  ->get_feature_matrix();
78  int32_t num_feat=feature_matrix.num_rows;
79  int32_t num_vec=feature_matrix.num_cols;
80  REQUIRE(num_vec==train_labels.vlen,"Number of training examples(%d) should be "
81  "equal to number of labels specified(%d)!\n", num_vec, train_labels.vlen);
82 
83  SGVector<int32_t> classidx_neg(num_vec);
84  SGVector<int32_t> classidx_pos(num_vec);
85 
86  int32_t i=0;
87  int32_t num_neg=0;
88  int32_t num_pos=0;
89 
90  for(i=0; i<train_labels.vlen; i++)
91  {
92  if (train_labels.vector[i]==-1)
93  classidx_neg[num_neg++]=i;
94 
95  else if(train_labels.vector[i]==+1)
96  classidx_pos[num_pos++]=i;
97  }
98 
99  w=SGVector<float64_t>(num_feat);
100  w.zero();
101  MatrixXd fmatrix=Map<MatrixXd>(feature_matrix.matrix, num_feat, num_vec);
102  VectorXd mean_neg(num_feat);
103  mean_neg=VectorXd::Zero(num_feat);
104  VectorXd mean_pos(num_feat);
105  mean_pos=VectorXd::Zero(num_feat);
106 
107  //mean neg
108  for(i=0; i<num_neg; i++)
109  mean_neg+=fmatrix.col(classidx_neg[i]);
110  mean_neg/=(float64_t)num_neg;
111 
112  // get m(-ve) - mean(-ve)
113  for(i=0; i<num_neg; i++)
114  fmatrix.col(classidx_neg[i])-=mean_neg;
115 
116  //mean pos
117  for(i=0; i<num_pos; i++)
118  mean_pos+=fmatrix.col(classidx_pos[i]);
119  mean_pos/=(float64_t)num_pos;
120 
121  // get m(+ve) - mean(+ve)
122  for(i=0; i<num_pos; i++)
123  fmatrix.col(classidx_pos[i])-=mean_pos;
124 
125  SGMatrix<float64_t>scatter_matrix(num_feat, num_feat);
126  Map<MatrixXd> scatter(scatter_matrix.matrix, num_feat, num_feat);
127 
128  if (m_method == FLD_LDA || (m_method==AUTO_LDA && num_vec>num_feat))
129  {
130  // covariance matrix.
131  MatrixXd cov_mat(num_feat, num_feat);
132  cov_mat=fmatrix*fmatrix.transpose();
133  scatter=cov_mat/(num_vec-1);
134  float64_t trace=scatter.trace();
135  double s=1.0-m_gamma;
136  scatter *=s;
137  scatter.diagonal()+=VectorXd::Constant(num_feat, trace*m_gamma/num_feat);
138 
139  // the usual way
140  // we need to find a Basic Linear Solution of A.x=b for 'x'.
141  // Instead of crudely Inverting A, we go for solve() using Decompositions.
142  // where:
143  // MatrixXd A=scatter;
144  // VectorXd b=mean_pos-mean_neg;
145  // VectorXd x=w;
146  Map<VectorXd> x(w.vector, num_feat);
147  LLT<MatrixXd> decomposition(scatter);
148  x=decomposition.solve(mean_pos-mean_neg);
149 
150  // get the weights w_neg(for -ve class) and w_pos(for +ve class)
151  VectorXd w_neg=decomposition.solve(mean_neg);
152  VectorXd w_pos=decomposition.solve(mean_pos);
153 
154  // get the bias.
155  bias=0.5*(w_neg.dot(mean_neg)-w_pos.dot(mean_pos));
156  }
157 
158  else
159  {
160  //for algorithmic detail, please refer to section 16.3.1. of Bayesian
161  //Reasoning and Machine Learning by David Barber.
162 
163  //we will perform SVD here.
164  MatrixXd fmatrix1=Map<MatrixXd>(feature_matrix.matrix, num_feat, num_vec);
165 
166  // to hold the centered positive and negative class data
167  MatrixXd cen_pos(num_feat,num_pos);
168  MatrixXd cen_neg(num_feat,num_neg);
169 
170  for(i=0; i<num_pos;i++)
171  cen_pos.col(i)=fmatrix.col(classidx_pos[i]);
172 
173  for(i=0; i<num_neg;i++)
174  cen_neg.col(i)=fmatrix.col(classidx_neg[i]);
175 
176  //+ve covariance matrix
177  cen_pos=cen_pos*cen_pos.transpose()/(float64_t(num_pos-1));
178 
179  //-ve covariance matrix
180  cen_neg=cen_neg*cen_neg.transpose()/(float64_t(num_neg-1));
181 
182  //within class matrix
183  MatrixXd Sw= num_pos*cen_pos+num_neg*cen_neg;
184  float64_t trace=Sw.trace();
185  double s=1.0-m_gamma;
186  Sw *=s;
187  Sw.diagonal()+=VectorXd::Constant(num_feat, trace*m_gamma/num_feat);
188 
189  //total mean
190  VectorXd mean_total=(num_pos*mean_pos+num_neg*mean_neg)/(float64_t)num_vec;
191 
192  //between class matrix
193  MatrixXd Sb(num_feat,2);
194  Sb.col(0)=sqrt(num_pos)*(mean_pos-mean_total);
195  Sb.col(1)=sqrt(num_neg)*(mean_neg-mean_total);
196 
197  JacobiSVD<MatrixXd> svd(fmatrix1, ComputeThinU);
198 
199  // basis to represent the solution
200  MatrixXd Q=svd.matrixU();
201  // modified between class scatter
202  Sb=Q.transpose()*(Sb*(Sb.transpose()))*Q;
203 
204  // modified within class scatter
205  Sw=Q.transpose()*Sw*Q;
206 
207  // to find SVD((inverse(Chol(Sw)))' * Sb * (inverse(Chol(Sw))))
208  //1.get Cw=Chol(Sw)
209  //find the decomposition of Cw'.
210  HouseholderQR<MatrixXd> decomposition(Sw.llt().matrixU().transpose());
211 
212  //2.get P=inv(Cw')*Sb_new
213  //MatrixXd P=decomposition.solve(Sb);
214  //3. final value to be put in SVD will be therefore:
215  // final_ output =(inv(Cw')*(P'))'
216  JacobiSVD<MatrixXd> svd2(decomposition.solve((decomposition.solve(Sb))
217  .transpose()).transpose(), ComputeThinU);
218 
219  // Since this is a linear classifier, with only binary classes,
220  // we need to keep only the 1st eigenvector.
221  Map<VectorXd> x(w.vector, num_feat);
222  x=Q*(svd2.matrixU().col(0));
223  // get the bias
224  bias=(x.transpose()*mean_total);
225  bias=bias*(-1);
226  }
227  return true;
228 }
229 #endif//HAVE_EIGEN3
virtual const char * get_name() const =0
virtual bool train_machine(CFeatures *data=NULL)
Definition: LDA.cpp:59
virtual ELabelType get_label_type() const =0
binary labels +1/-1
Definition: LabelTypes.h:18
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
Definition: SGMatrix.h:20
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:378
virtual void set_features(CDotFeatures *feat)
Definition: LDA.h:144
Features that support dot products among other operations.
Definition: DotFeatures.h:44
index_t num_rows
Definition: SGMatrix.h:376
index_t vlen
Definition: SGVector.h:494
CLDA(float64_t gamma=0, ELDAMethod method=AUTO_LDA)
Definition: LDA.cpp:26
double float64_t
Definition: common.h:50
SGVector< float64_t > w
float64_t m_gamma
Definition: LDA.h:172
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
Definition: LinearMachine.h:63
ELDAMethod
Definition: LDA.h:27
CDotFeatures * features
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual ~CLDA()
Definition: LDA.cpp:55
int machine_int_t
Definition: common.h:59
The class Features is the base class of all feature objects.
Definition: Features.h:68
void init()
Definition: LDA.cpp:45
ELDAMethod m_method
Definition: LDA.h:174
Binary Labels for binary classification.
Definition: BinaryLabels.h:37
#define SG_ADD(...)
Definition: SGObject.h:81
bool has_property(EFeatureProperty p) const
Definition: Features.cpp:295
virtual void set_labels(CLabels *lab)
Definition: Machine.cpp:65

SHOGUN Machine Learning Toolbox - Documentation