SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
LDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 2014 Abhijeet Kislay
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 #include <shogun/lib/config.h>
12 
13 #include <shogun/lib/common.h>
14 #include <shogun/machine/Machine.h>
16 #include <shogun/classifier/LDA.h>
17 #include <shogun/labels/Labels.h>
21 
22 using namespace Eigen;
23 using namespace shogun;
24 
25 CLDA::CLDA(float64_t gamma, ELDAMethod method)
27 {
28  init();
29  m_method=method;
30  m_gamma=gamma;
31 }
32 
34  CLabels *trainlab, ELDAMethod method)
35  :CLinearMachine(), m_gamma(gamma)
36 {
37  init();
38  set_features(traindat);
39  set_labels(trainlab);
40  m_method=method;
41  m_gamma=gamma;
42 }
43 
44 void CLDA::init()
45 {
47  m_gamma=0;
48  SG_ADD((machine_int_t*) &m_method, "m_method",
49  "Method used for LDA calculation", MS_NOT_AVAILABLE);
50  SG_ADD((machine_int_t*) &m_gamma, "m_gamma",
51  "Regularization parameter", MS_NOT_AVAILABLE);
52 }
53 
55 {
56 }
57 
59 {
60  REQUIRE(m_labels, "Labels for the given features are not specified!\n")
61  REQUIRE(m_labels->get_label_type()==LT_BINARY, "The labels should of type"
62  " CBinaryLabels! you provided %s \n",m_labels->get_name())
63 
64  if(data)
65  {
66  if(!data->has_property(FP_DOT))
67  SG_ERROR("Specified features are not of type CDotFeatures\n")
68  set_features((CDotFeatures*) data);
69  }
70 
71  REQUIRE(features, "Features are not provided!\n")
72  SGVector<int32_t>train_labels=((CBinaryLabels *)m_labels)->get_int_labels();
73  REQUIRE(train_labels.vector,"Provided Labels are empty!\n")
74 
76  ->get_feature_matrix();
77  int32_t num_feat=feature_matrix.num_rows;
78  int32_t num_vec=feature_matrix.num_cols;
79  REQUIRE(num_vec==train_labels.vlen,"Number of training examples(%d) should be "
80  "equal to number of labels specified(%d)!\n", num_vec, train_labels.vlen);
81 
82  SGVector<int32_t> classidx_neg(num_vec);
83  SGVector<int32_t> classidx_pos(num_vec);
84 
85  int32_t i=0;
86  int32_t num_neg=0;
87  int32_t num_pos=0;
88 
89  for(i=0; i<train_labels.vlen; i++)
90  {
91  if (train_labels.vector[i]==-1)
92  classidx_neg[num_neg++]=i;
93 
94  else if(train_labels.vector[i]==+1)
95  classidx_pos[num_pos++]=i;
96  }
97 
98  w=SGVector<float64_t>(num_feat);
99  w.zero();
100  MatrixXd fmatrix=Map<MatrixXd>(feature_matrix.matrix, num_feat, num_vec);
101  VectorXd mean_neg(num_feat);
102  mean_neg=VectorXd::Zero(num_feat);
103  VectorXd mean_pos(num_feat);
104  mean_pos=VectorXd::Zero(num_feat);
105 
106  //mean neg
107  for(i=0; i<num_neg; i++)
108  mean_neg+=fmatrix.col(classidx_neg[i]);
109  mean_neg/=(float64_t)num_neg;
110 
111  // get m(-ve) - mean(-ve)
112  for(i=0; i<num_neg; i++)
113  fmatrix.col(classidx_neg[i])-=mean_neg;
114 
115  //mean pos
116  for(i=0; i<num_pos; i++)
117  mean_pos+=fmatrix.col(classidx_pos[i]);
118  mean_pos/=(float64_t)num_pos;
119 
120  // get m(+ve) - mean(+ve)
121  for(i=0; i<num_pos; i++)
122  fmatrix.col(classidx_pos[i])-=mean_pos;
123 
124  SGMatrix<float64_t>scatter_matrix(num_feat, num_feat);
125  Map<MatrixXd> scatter(scatter_matrix.matrix, num_feat, num_feat);
126 
127  if (m_method == FLD_LDA || (m_method==AUTO_LDA && num_vec>num_feat))
128  {
129  // covariance matrix.
130  MatrixXd cov_mat(num_feat, num_feat);
131  cov_mat=fmatrix*fmatrix.transpose();
132  scatter=cov_mat/(num_vec-1);
133  float64_t trace=scatter.trace();
134  double s=1.0-m_gamma;
135  scatter *=s;
136  scatter.diagonal()+=VectorXd::Constant(num_feat, trace*m_gamma/num_feat);
137 
138  // the usual way
139  // we need to find a Basic Linear Solution of A.x=b for 'x'.
140  // Instead of crudely Inverting A, we go for solve() using Decompositions.
141  // where:
142  // MatrixXd A=scatter;
143  // VectorXd b=mean_pos-mean_neg;
144  // VectorXd x=w;
145  Map<VectorXd> x(w.vector, num_feat);
146  LLT<MatrixXd> decomposition(scatter);
147  x=decomposition.solve(mean_pos-mean_neg);
148 
149  // get the weights w_neg(for -ve class) and w_pos(for +ve class)
150  VectorXd w_neg=decomposition.solve(mean_neg);
151  VectorXd w_pos=decomposition.solve(mean_pos);
152 
153  // get the bias.
154  bias=0.5*(w_neg.dot(mean_neg)-w_pos.dot(mean_pos));
155  }
156 
157  else
158  {
159  //for algorithmic detail, please refer to section 16.3.1. of Bayesian
160  //Reasoning and Machine Learning by David Barber.
161 
162  //we will perform SVD here.
163  MatrixXd fmatrix1=Map<MatrixXd>(feature_matrix.matrix, num_feat, num_vec);
164 
165  // to hold the centered positive and negative class data
166  MatrixXd cen_pos(num_feat,num_pos);
167  MatrixXd cen_neg(num_feat,num_neg);
168 
169  for(i=0; i<num_pos;i++)
170  cen_pos.col(i)=fmatrix.col(classidx_pos[i]);
171 
172  for(i=0; i<num_neg;i++)
173  cen_neg.col(i)=fmatrix.col(classidx_neg[i]);
174 
175  //+ve covariance matrix
176  cen_pos=cen_pos*cen_pos.transpose()/(float64_t(num_pos-1));
177 
178  //-ve covariance matrix
179  cen_neg=cen_neg*cen_neg.transpose()/(float64_t(num_neg-1));
180 
181  //within class matrix
182  MatrixXd Sw= num_pos*cen_pos+num_neg*cen_neg;
183  float64_t trace=Sw.trace();
184  double s=1.0-m_gamma;
185  Sw *=s;
186  Sw.diagonal()+=VectorXd::Constant(num_feat, trace*m_gamma/num_feat);
187 
188  //total mean
189  VectorXd mean_total=(num_pos*mean_pos+num_neg*mean_neg)/(float64_t)num_vec;
190 
191  //between class matrix
192  MatrixXd Sb(num_feat,2);
193  Sb.col(0)=sqrt(num_pos)*(mean_pos-mean_total);
194  Sb.col(1)=sqrt(num_neg)*(mean_neg-mean_total);
195 
196  JacobiSVD<MatrixXd> svd(fmatrix1, ComputeThinU);
197 
198  // basis to represent the solution
199  MatrixXd Q=svd.matrixU();
200  // modified between class scatter
201  Sb=Q.transpose()*(Sb*(Sb.transpose()))*Q;
202 
203  // modified within class scatter
204  Sw=Q.transpose()*Sw*Q;
205 
206  // to find SVD((inverse(Chol(Sw)))' * Sb * (inverse(Chol(Sw))))
207  //1.get Cw=Chol(Sw)
208  //find the decomposition of Cw'.
209  HouseholderQR<MatrixXd> decomposition(Sw.llt().matrixU().transpose());
210 
211  //2.get P=inv(Cw')*Sb_new
212  //MatrixXd P=decomposition.solve(Sb);
213  //3. final value to be put in SVD will be therefore:
214  // final_ output =(inv(Cw')*(P'))'
215  JacobiSVD<MatrixXd> svd2(decomposition.solve((decomposition.solve(Sb))
216  .transpose()).transpose(), ComputeThinU);
217 
218  // Since this is a linear classifier, with only binary classes,
219  // we need to keep only the 1st eigenvector.
220  Map<VectorXd> x(w.vector, num_feat);
221  x=Q*(svd2.matrixU().col(0));
222  // get the bias
223  bias=(x.transpose()*mean_total);
224  bias=bias*(-1);
225  }
226  return true;
227 }
virtual const char * get_name() const =0
virtual bool train_machine(CFeatures *data=NULL)
Definition: LDA.cpp:58
virtual ELabelType get_label_type() const =0
binary labels +1/-1
Definition: LabelTypes.h:18
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
Definition: SGMatrix.h:20
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:376
virtual void set_features(CDotFeatures *feat)
Definition: LDA.h:143
Features that support dot products among other operations.
Definition: DotFeatures.h:44
index_t num_rows
Definition: SGMatrix.h:374
index_t vlen
Definition: SGVector.h:492
CLDA(float64_t gamma=0, ELDAMethod method=AUTO_LDA)
Definition: LDA.cpp:25
double float64_t
Definition: common.h:50
SGVector< float64_t > w
float64_t m_gamma
Definition: LDA.h:171
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
Definition: LinearMachine.h:63
ELDAMethod
Definition: LDA.h:26
CDotFeatures * features
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual ~CLDA()
Definition: LDA.cpp:54
int machine_int_t
Definition: common.h:59
The class Features is the base class of all feature objects.
Definition: Features.h:68
void init()
Definition: LDA.cpp:44
ELDAMethod m_method
Definition: LDA.h:173
Binary Labels for binary classification.
Definition: BinaryLabels.h:37
#define SG_ADD(...)
Definition: SGObject.h:81
bool has_property(EFeatureProperty p) const
Definition: Features.cpp:295
virtual void set_labels(CLabels *lab)
Definition: Machine.cpp:65

SHOGUN Machine Learning Toolbox - Documentation