SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
MCLDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Kevin Hughes
8  * Copyright (C) 2013 Kevin Hughes
9  *
10  * Thanks to Fernando Jose Iglesias Garcia (shogun)
11  * and Matthieu Perrot (scikit-learn)
12  */
13 
14 #include <shogun/lib/common.h>
15 
16 #ifdef HAVE_EIGEN3
17 
21 #include <shogun/labels/Labels.h>
24 
26 
27 using namespace shogun;
28 using namespace Eigen;
29 
30 CMCLDA::CMCLDA(float64_t tolerance, bool store_cov)
32 {
33  init();
34  m_tolerance=tolerance;
35  m_store_cov=store_cov;
36 
37 }
38 
39 CMCLDA::CMCLDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, float64_t tolerance, bool store_cov)
41 {
42  init();
43 
44  m_tolerance=tolerance;
45  m_store_cov=store_cov;
46 
47  set_features(traindat);
48  set_labels(trainlab);
49 }
50 
52 {
53  SG_UNREF(m_features);
54 
55  cleanup();
56 }
57 
58 void CMCLDA::init()
59 {
60  SG_ADD(&m_tolerance, "m_tolerance", "Tolerance member.", MS_AVAILABLE);
61  SG_ADD(&m_store_cov, "m_store_cov", "Store covariance member", MS_NOT_AVAILABLE);
62  SG_ADD((CSGObject**) &m_features, "m_features", "Feature object.", MS_NOT_AVAILABLE);
63  SG_ADD(&m_means, "m_means", "Mean vectors list", MS_NOT_AVAILABLE);
64  SG_ADD(&m_cov, "m_cov", "covariance matrix", MS_NOT_AVAILABLE);
65  SG_ADD(&m_xbar, "m_xbar", "total mean", MS_NOT_AVAILABLE);
66  SG_ADD(&m_scalings, "m_scalings", "scalings", MS_NOT_AVAILABLE);
67  SG_ADD(&m_rank, "m_rank", "rank", MS_NOT_AVAILABLE);
68  SG_ADD(&m_coef, "m_coef", "weight vector", MS_NOT_AVAILABLE);
69  SG_ADD(&m_intercept, "m_intercept", "intercept", MS_NOT_AVAILABLE);
70 
71  m_features = NULL;
72  m_num_classes=0;
73  m_dim=0;
74  m_rank=0;
75 }
76 
77 void CMCLDA::cleanup()
78 {
79  m_num_classes = 0;
80 }
81 
83 {
84  if (data)
85  {
86  if (!data->has_property(FP_DOT))
87  SG_ERROR("Specified features are not of type CDotFeatures\n")
88 
89  set_features((CDotFeatures*) data);
90  }
91 
92  if (!m_features)
93  return NULL;
94 
95  int32_t num_vecs = m_features->get_num_vectors();
96  ASSERT(num_vecs > 0)
97  ASSERT( m_dim == m_features->get_dim_feature_space() );
98 
99  // collect features into a matrix
101 
102  MatrixXd X(num_vecs, m_dim);
103 
104  int32_t vlen;
105  bool vfree;
106  float64_t* vec;
107  Map< VectorXd > Em_xbar(m_xbar, m_dim);
108  for (int i = 0; i < num_vecs; i++)
109  {
110  vec = rf->get_feature_vector(i, vlen, vfree);
111  ASSERT(vec)
112 
113  Map< VectorXd > Evec(vec, vlen);
114 
115  X.row(i) = Evec - Em_xbar;
116 
117  rf->free_feature_vector(vec, i, vfree);
118  }
119 
120 #ifdef DEBUG_MCLDA
121  SG_PRINT("\n>>> Displaying X ...\n")
122  SGMatrix< float64_t >::display_matrix(X.data(), num_vecs, m_dim);
123 #endif
124 
125  // center and scale data
126  MatrixXd Xs(num_vecs, m_rank);
127  Map< MatrixXd > Em_scalings(m_scalings.matrix, m_dim, m_rank);
128  Xs = X*Em_scalings;
129 
130 #ifdef DEBUG_MCLDA
131  SG_PRINT("\n>>> Displaying Xs ...\n")
132  SGMatrix< float64_t >::display_matrix(Xs.data(), num_vecs, m_rank);
133 #endif
134 
135  // decision function
136  MatrixXd d(num_vecs, m_num_classes);
137  Map< MatrixXd > Em_coef(m_coef.matrix, m_num_classes, m_rank);
138  Map< VectorXd > Em_intercept(m_intercept.vector, m_num_classes);
139  d = (Xs*Em_coef.transpose()).rowwise() + Em_intercept.transpose();
140 
141 #ifdef DEBUG_MCLDA
142  SG_PRINT("\n>>> Displaying d ...\n")
143  SGMatrix< float64_t >::display_matrix(d.data(), num_vecs, m_num_classes);
144 #endif
145 
146  // argmax to apply labels
147  CMulticlassLabels* out = new CMulticlassLabels(num_vecs);
148  for (int i = 0; i < num_vecs; i++)
149  out->set_label(i, CMath::arg_max(d.data()+i, num_vecs, m_num_classes));
150 
151  return out;
152 }
153 
155 {
156  if (!m_labels)
157  SG_ERROR("No labels allocated in MCLDA training\n")
158 
159  if (data)
160  {
161  if (!data->has_property(FP_DOT))
162  SG_ERROR("Speficied features are not of type CDotFeatures\n")
163 
164  set_features((CDotFeatures*) data);
165  }
166 
167  if (!m_features)
168  SG_ERROR("No features allocated in MCLDA training\n")
169 
170  SGVector< int32_t > train_labels = ((CMulticlassLabels*) m_labels)->get_int_labels();
171 
172  if (!train_labels.vector)
173  SG_ERROR("No train_labels allocated in MCLDA training\n")
174 
175  cleanup();
176 
177  m_num_classes = ((CMulticlassLabels*) m_labels)->get_num_classes();
178  m_dim = m_features->get_dim_feature_space();
179  int32_t num_vec = m_features->get_num_vectors();
180 
181  if (num_vec != train_labels.vlen)
182  SG_ERROR("Dimension mismatch between features and labels in MCLDA training")
183 
184  int32_t* class_idxs = SG_MALLOC(int32_t, num_vec*m_num_classes);
185  int32_t* class_nums = SG_MALLOC(int32_t, m_num_classes); // number of examples of each class
186  memset(class_nums, 0, m_num_classes*sizeof(int32_t));
187 
188  for (int i = 0; i < train_labels.vlen; i++)
189  {
190  int32_t class_idx = train_labels.vector[i];
191 
192  if (class_idx < 0 || class_idx >= m_num_classes)
193  {
194  SG_ERROR("found label out of {0, 1, 2, ..., num_classes-1}...")
195  return false;
196  }
197  else
198  {
199  class_idxs[ class_idx*num_vec + class_nums[class_idx]++ ] = i;
200  }
201  }
202 
203  for (int i = 0; i < m_num_classes; i++)
204  {
205  if (class_nums[i] <= 0)
206  {
207  SG_ERROR("What? One class with no elements\n")
208  return false;
209  }
210  }
211 
213 
214  // if ( m_store_cov )
215  index_t * cov_dims = SG_MALLOC(index_t, 3);
216  cov_dims[0] = m_dim;
217  cov_dims[1] = m_dim;
218  cov_dims[2] = m_num_classes;
219  SGNDArray< float64_t > covs(cov_dims, 3);
220 
221  m_means = SGMatrix< float64_t >(m_dim, m_num_classes, true);
222 
223  // matrix of all samples
224  MatrixXd X = MatrixXd::Zero(num_vec, m_dim);
225  int32_t iX = 0;
226 
227  m_means.zero();
228  m_cov.zero();
229 
230  int32_t vlen;
231  bool vfree;
232  float64_t* vec;
233  for (int k = 0; k < m_num_classes; k++)
234  {
235  // gather all the samples for class k into buffer and calculate the mean of class k
236  MatrixXd buffer(class_nums[k], m_dim);
237  Map< VectorXd > Em_mean(m_means.get_column_vector(k), m_dim);
238  for (int i = 0; i < class_nums[k]; i++)
239  {
240  vec = rf->get_feature_vector(class_idxs[k*num_vec + i], vlen, vfree);
241  ASSERT(vec)
242 
243  Map< VectorXd > Evec(vec, vlen);
244  Em_mean += Evec;
245  buffer.row(i) = Evec;
246 
247  rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
248  }
249 
250  Em_mean /= class_nums[k];
251 
252  // subtract the mean of class k from each sample of class k and store the centered data in Xc
253  for (int i = 0; i < class_nums[k]; i++)
254  {
255  buffer.row(i) -= Em_mean;
256  X.row(iX) += buffer.row(i);
257  iX++;
258  }
259 
260  if (m_store_cov)
261  {
262  // calc cov = buffer.T * buffer
263  Map< MatrixXd > Em_cov_k(covs.get_matrix(k), m_dim, m_dim);
264  Em_cov_k = buffer.transpose() * buffer;
265  }
266  }
267 
268 #ifdef DEBUG_MCLDA
269  SG_PRINT("\n>>> Displaying means ...\n")
270  SGMatrix< float64_t >::display_matrix(m_means.matrix, m_dim, m_num_classes);
271 #endif
272 
273  if (m_store_cov)
274  {
275  m_cov = SGMatrix< float64_t >(m_dim, m_dim, true);
276  m_cov.zero();
277  Map< MatrixXd > Em_cov(m_cov.matrix, m_dim, m_dim);
278 
279  for (int k = 0; k < m_num_classes; k++)
280  {
281  Map< MatrixXd > Em_cov_k(covs.get_matrix(k), m_dim, m_dim);
282  Em_cov += Em_cov_k;
283  }
284 
285  Em_cov /= m_num_classes;
286  }
287 
288 #ifdef DEBUG_MCLDA
289  if (m_store_cov)
290  {
291  SG_PRINT("\n>>> Displaying cov ...\n")
292  SGMatrix< float64_t >::display_matrix(m_cov.matrix, m_dim, m_dim);
293  }
294 #endif
295 
297  // 1) within (univariate) scaling by with classes std-dev
298 
299  // std-dev of X
300  m_xbar = SGVector< float64_t >(m_dim);
301  m_xbar.zero();
302  Map< VectorXd > Em_xbar(m_xbar.vector, m_dim);
303  Em_xbar = X.colwise().sum();
304  Em_xbar /= num_vec;
305 
306  VectorXd std = VectorXd::Zero(m_dim);
307  std = (X.rowwise() - Em_xbar.transpose()).array().pow(2).colwise().sum();
308  std = std.array() / num_vec;
309 
310  for (int j = 0; j < m_dim; j++)
311  if(std[j] == 0)
312  std[j] = 1;
313 
314  float64_t fac = 1.0 / (num_vec - m_num_classes);
315 
316 #ifdef DEBUG_MCLDA
317  SG_PRINT("\n>>> Displaying m_xbar ...\n")
319 
320  SG_PRINT("\n>>> Displaying std ...\n")
321  SGVector< float64_t >::display_vector(std.data(), m_dim);
322 #endif
323 
325  // 2) Within variance scaling
326  for (int i = 0; i < num_vec; i++)
327  X.row(i) = sqrt(fac) * X.row(i).array() / std.transpose().array();
328 
329 
330  // SVD of centered (within)scaled data
331  VectorXd S(m_dim);
332  MatrixXd V(m_dim, m_dim);
333 
334  Eigen::JacobiSVD<MatrixXd> eSvd;
335  eSvd.compute(X,Eigen::ComputeFullV);
336  memcpy(S.data(), eSvd.singularValues().data(), m_dim*sizeof(float64_t));
337  memcpy(V.data(), eSvd.matrixV().data(), m_dim*m_dim*sizeof(float64_t));
338  V.transposeInPlace();
339 
340  int rank = 0;
341  while (rank < m_dim && S[rank] > m_tolerance)
342  {
343  rank++;
344  }
345 
346  if (rank < m_dim)
347  SG_ERROR("Warning: Variables are collinear\n")
348 
349  MatrixXd scalings(m_dim, rank);
350  for (int i = 0; i < m_dim; i++)
351  for (int j = 0; j < rank; j++)
352  scalings(i,j) = V(j,i) / std[j] / S[j];
353 
354 #ifdef DEBUG_MCLDA
355  SG_PRINT("\n>>> Displaying scalings ...\n")
356  SGMatrix< float64_t >::display_matrix(scalings.data(), m_dim, rank);
357 #endif
358 
360  // 3) Between variance scaling
361 
362  // Xc = m_means dot scalings
363  MatrixXd Xc(m_num_classes, rank);
364  Map< MatrixXd > Em_means(m_means.matrix, m_dim, m_num_classes);
365  Xc = (Em_means.transpose()*scalings);
366 
367  for (int i = 0; i < m_num_classes; i++)
368  Xc.row(i) *= sqrt(class_nums[i] * fac);
369 
370  // Centers are living in a space with n_classes-1 dim (maximum)
371  // Use svd to find projection in the space spanned by the
372  // (n_classes) centers
373  S = VectorXd(rank);
374  V = MatrixXd(rank, rank);
375 
376  eSvd.compute(Xc,Eigen::ComputeFullV);
377  memcpy(S.data(), eSvd.singularValues().data(), rank*sizeof(float64_t));
378  memcpy(V.data(), eSvd.matrixV().data(), rank*rank*sizeof(float64_t));
379 
380  m_rank = 0;
381  while (m_rank < rank && S[m_rank] > m_tolerance*S[0])
382  {
383  m_rank++;
384  }
385 
386  // compose the scalings
387  m_scalings = SGMatrix< float64_t >(rank, m_rank);
388  Map< MatrixXd > Em_scalings(m_scalings.matrix, rank, m_rank);
389  Em_scalings = scalings * V.leftCols(m_rank);
390 
391 #ifdef DEBUG_MCLDA
392  SG_PRINT("\n>>> Displaying m_scalings ...\n")
393  SGMatrix< float64_t >::display_matrix(m_scalings.matrix, rank, m_rank);
394 #endif
395 
396  // weight vectors / centroids
397  MatrixXd meansc(m_dim, m_num_classes);
398  meansc = Em_means.colwise() - Em_xbar;
399 
400  m_coef = SGMatrix< float64_t >(m_num_classes, m_rank);
401  Map< MatrixXd > Em_coef(m_coef.matrix, m_num_classes, m_rank);
402  Em_coef = meansc.transpose() * Em_scalings;
403 
404 #ifdef DEBUG_MCLDA
405  SG_PRINT("\n>>> Displaying m_coefs ...\n")
406  SGMatrix< float64_t >::display_matrix(m_coef.matrix, m_num_classes, m_rank);
407 #endif
408 
409  // intercept
410  m_intercept = SGVector< float64_t >(m_num_classes);
411  m_intercept.zero();
412  for (int j = 0; j < m_num_classes; j++)
413  m_intercept[j] = -0.5*m_coef[j]*m_coef[j] + log(class_nums[j]/float(num_vec));
414 
415 #ifdef DEBUG_MCLDA
416  SG_PRINT("\n>>> Displaying m_intercept ...\n")
417  SGVector< float64_t >::display_vector(m_intercept.vector, m_num_classes);
418 #endif
419 
420  SG_FREE(class_idxs);
421  SG_FREE(class_nums);
422 
423  return true;
424 }
425 
426 #endif /* HAVE_EIGEN3 */
void display_matrix(const char *name="matrix") const
Definition: SGMatrix.cpp:394
CMCLDA(float64_t tolerance=1e-4, bool store_cov=false)
Definition: MCLDA.cpp:30
static int32_t arg_max(T *vec, int32_t inc, int32_t len, T *maxv_ptr=NULL)
Definition: Math.h:262
ST * get_feature_vector(int32_t num, int32_t &len, bool &dofree)
experimental abstract native multiclass machine class
virtual ~CMCLDA()
Definition: MCLDA.cpp:51
int32_t index_t
Definition: common.h:62
T * data() const
Definition: SGMatrix.h:181
virtual CMulticlassLabels * apply_multiclass(CFeatures *data=NULL)
Definition: MCLDA.cpp:82
virtual bool train_machine(CFeatures *data=NULL)
Definition: MCLDA.cpp:154
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
Definition: SGMatrix.h:20
virtual int32_t get_num_vectors() const =0
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
virtual void set_features(CDotFeatures *feat)
Definition: MCLDA.h:91
Features that support dot products among other operations.
Definition: DotFeatures.h:44
T * get_matrix(index_t matIdx) const
Definition: SGNDArray.h:72
virtual int32_t get_dim_feature_space() const =0
void display_vector(const char *name="vector", const char *prefix="") const
Definition: SGVector.cpp:356
Multiclass Labels for multi-class classification.
index_t vlen
Definition: SGVector.h:494
#define SG_PRINT(...)
Definition: SGIO.h:137
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
double float64_t
Definition: common.h:50
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:115
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
#define SG_UNREF(x)
Definition: SGObject.h:52
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
void free_feature_vector(ST *feat_vec, int32_t num, bool dofree)
The class Features is the base class of all feature objects.
Definition: Features.h:68
#define SG_ADD(...)
Definition: SGObject.h:81
bool has_property(EFeatureProperty p) const
Definition: Features.cpp:295
virtual void set_labels(CLabels *lab)

SHOGUN Machine Learning Toolbox - Documentation