SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
QDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012 Fernando José Iglesias García
8  * Copyright (C) 2012 Fernando José Iglesias García
9  */
10 
11 #include <shogun/lib/common.h>
12 
13 
14 #include <shogun/multiclass/QDA.h>
17 #include <shogun/labels/Labels.h>
20 
22 
23 using namespace shogun;
24 using namespace Eigen;
25 
26 CQDA::CQDA() : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
27 {
28  init();
29 }
30 
31 CQDA::CQDA(float64_t tolerance, bool store_covs)
32 : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
33 {
34  init();
35  m_tolerance = tolerance;
36  m_store_covs = store_covs;
37 }
38 
40 : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
41 {
42  init();
43  set_features(traindat);
44  set_labels(trainlab);
45 }
46 
47 CQDA::CQDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, float64_t tolerance)
48 : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
49 {
50  init();
51  set_features(traindat);
52  set_labels(trainlab);
53  m_tolerance = tolerance;
54 }
55 
56 CQDA::CQDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, bool store_covs)
57 : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
58 {
59  init();
60  set_features(traindat);
61  set_labels(trainlab);
62  m_store_covs = store_covs;
63 }
64 
65 
66 
67 CQDA::CQDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, float64_t tolerance, bool store_covs)
68 : CNativeMulticlassMachine(), m_num_classes(0), m_dim(0)
69 {
70  init();
71  set_features(traindat);
72  set_labels(trainlab);
73  m_tolerance = tolerance;
74  m_store_covs = store_covs;
75 }
76 
78 {
79  SG_UNREF(m_features);
80 
81  cleanup();
82 }
83 
84 void CQDA::init()
85 {
86  m_tolerance = 1e-4;
87  m_store_covs = false;
88  SG_ADD(&m_tolerance, "m_tolerance", "Tolerance member.", MS_AVAILABLE);
89  SG_ADD(&m_store_covs, "m_store_covs", "Store covariances member", MS_NOT_AVAILABLE);
90  SG_ADD((CSGObject**) &m_features, "m_features", "Feature object.", MS_NOT_AVAILABLE);
91  SG_ADD(&m_means, "m_means", "Mean vectors list", MS_NOT_AVAILABLE);
92  SG_ADD(&m_slog, "m_slog", "Vector used in classification", MS_NOT_AVAILABLE);
93 
94  //TODO include SGNDArray objects for serialization
95 
96  m_features = NULL;
97 }
98 
99 void CQDA::cleanup()
100 {
101  m_means=SGMatrix<float64_t>();
102 
103  m_num_classes = 0;
104 }
105 
107 {
108  if (data)
109  {
110  if (!data->has_property(FP_DOT))
111  SG_ERROR("Specified features are not of type CDotFeatures\n")
112 
113  set_features((CDotFeatures*) data);
114  }
115 
116  if ( !m_features )
117  return NULL;
118 
119  int32_t num_vecs = m_features->get_num_vectors();
120  ASSERT(num_vecs > 0)
121  ASSERT( m_dim == m_features->get_dim_feature_space() )
122 
124 
125  MatrixXd X(num_vecs, m_dim);
126  MatrixXd A(num_vecs, m_dim);
127  VectorXd norm2(num_vecs*m_num_classes);
128  norm2.setZero();
129 
130  int32_t vlen;
131  bool vfree;
132  float64_t* vec;
133  for (int k = 0; k < m_num_classes; k++)
134  {
135  // X = features - means
136  for (int i = 0; i < num_vecs; i++)
137  {
138  vec = rf->get_feature_vector(i, vlen, vfree);
139  ASSERT(vec)
140 
141  Map< VectorXd > Evec(vec,vlen);
142  Map< VectorXd > Em_means_col(m_means.get_column_vector(k), m_dim);
143 
144  X.row(i) = Evec - Em_means_col;
145 
146  rf->free_feature_vector(vec, i, vfree);
147  }
148 
149  Map< MatrixXd > Em_M(m_M.get_matrix(k), m_dim, m_dim);
150  A = X*Em_M;
151 
152  for (int i = 0; i < num_vecs; i++)
153  norm2(i + k*num_vecs) = A.row(i).array().square().sum();
154 
155 #ifdef DEBUG_QDA
156  SG_PRINT("\n>>> Displaying A ...\n")
157  SGMatrix< float64_t >::display_matrix(A.data(), num_vecs, m_dim);
158 #endif
159  }
160 
161  for (int i = 0; i < num_vecs; i++)
162  for (int k = 0; k < m_num_classes; k++)
163  {
164  norm2[i + k*num_vecs] += m_slog[k];
165  norm2[i + k*num_vecs] *= -0.5;
166  }
167 
168 #ifdef DEBUG_QDA
169  SG_PRINT("\n>>> Displaying norm2 ...\n")
170  SGMatrix< float64_t >::display_matrix(norm2.data(), num_vecs, m_num_classes);
171 #endif
172 
173  CMulticlassLabels* out = new CMulticlassLabels(num_vecs);
174 
175  for (int i = 0 ; i < num_vecs; i++)
176  out->set_label(i, CMath::arg_max(norm2.data()+i, num_vecs, m_num_classes));
177 
178  return out;
179 }
180 
182 {
183  if (!m_labels)
184  SG_ERROR("No labels allocated in QDA training\n")
185 
186  if ( data )
187  {
188  if (!data->has_property(FP_DOT))
189  SG_ERROR("Speficied features are not of type CDotFeatures\n")
190 
191  set_features((CDotFeatures*) data);
192  }
193 
194  if (!m_features)
195  SG_ERROR("No features allocated in QDA training\n")
196 
197  SGVector< int32_t > train_labels = ((CMulticlassLabels*) m_labels)->get_int_labels();
198 
199  if (!train_labels.vector)
200  SG_ERROR("No train_labels allocated in QDA training\n")
201 
202  cleanup();
203 
204  m_num_classes = ((CMulticlassLabels*) m_labels)->get_num_classes();
205  m_dim = m_features->get_dim_feature_space();
206  int32_t num_vec = m_features->get_num_vectors();
207 
208  if (num_vec != train_labels.vlen)
209  SG_ERROR("Dimension mismatch between features and labels in QDA training")
210 
211  int32_t* class_idxs = SG_MALLOC(int32_t, num_vec*m_num_classes); // number of examples of each class
212  int32_t* class_nums = SG_MALLOC(int32_t, m_num_classes);
213  memset(class_nums, 0, m_num_classes*sizeof(int32_t));
214  int32_t class_idx;
215 
216  for (int i = 0; i < train_labels.vlen; i++)
217  {
218  class_idx = train_labels.vector[i];
219 
220  if (class_idx < 0 || class_idx >= m_num_classes)
221  {
222  SG_ERROR("found label out of {0, 1, 2, ..., num_classes-1}...")
223  return false;
224  }
225  else
226  {
227  class_idxs[ class_idx*num_vec + class_nums[class_idx]++ ] = i;
228  }
229  }
230 
231  for (int i = 0; i < m_num_classes; i++)
232  {
233  if (class_nums[i] <= 0)
234  {
235  SG_ERROR("What? One class with no elements\n")
236  return false;
237  }
238  }
239 
240  if (m_store_covs)
241  {
242  // cov_dims will be free in m_covs.destroy_ndarray()
243  index_t * cov_dims = SG_MALLOC(index_t, 3);
244  cov_dims[0] = m_dim;
245  cov_dims[1] = m_dim;
246  cov_dims[2] = m_num_classes;
247  m_covs = SGNDArray< float64_t >(cov_dims, 3);
248  }
249 
250  m_means = SGMatrix< float64_t >(m_dim, m_num_classes, true);
251  SGMatrix< float64_t > scalings = SGMatrix< float64_t >(m_dim, m_num_classes);
252 
253  // rot_dims will be freed in rotations.destroy_ndarray()
254  index_t* rot_dims = SG_MALLOC(index_t, 3);
255  rot_dims[0] = m_dim;
256  rot_dims[1] = m_dim;
257  rot_dims[2] = m_num_classes;
258  SGNDArray< float64_t > rotations = SGNDArray< float64_t >(rot_dims, 3);
259 
261 
262  m_means.zero();
263 
264  int32_t vlen;
265  bool vfree;
266  float64_t* vec;
267  for (int k = 0; k < m_num_classes; k++)
268  {
269  MatrixXd buffer(class_nums[k], m_dim);
270  Map< VectorXd > Em_means(m_means.get_column_vector(k), m_dim);
271  for (int i = 0; i < class_nums[k]; i++)
272  {
273  vec = rf->get_feature_vector(class_idxs[k*num_vec + i], vlen, vfree);
274  ASSERT(vec)
275 
276  Map< VectorXd > Evec(vec, vlen);
277  Em_means += Evec;
278  buffer.row(i) = Evec;
279 
280  rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
281  }
282 
283  Em_means /= class_nums[k];
284 
285  for (int i = 0; i < class_nums[k]; i++)
286  buffer.row(i) -= Em_means;
287 
288  // SVD
289  float64_t * col = scalings.get_column_vector(k);
290  float64_t * rot_mat = rotations.get_matrix(k);
291 
292  Eigen::JacobiSVD<MatrixXd> eSvd;
293  eSvd.compute(buffer,Eigen::ComputeFullV);
294  memcpy(col, eSvd.singularValues().data(), m_dim*sizeof(float64_t));
295  memcpy(rot_mat, eSvd.matrixV().data(), m_dim*m_dim*sizeof(float64_t));
296 
297  SGVector<float64_t>::vector_multiply(col, col, col, m_dim);
298  SGVector<float64_t>::scale_vector(1.0/(class_nums[k]-1), col, m_dim);
299 
300  if (m_store_covs)
301  {
302  SGMatrix< float64_t > M(m_dim ,m_dim);
303  MatrixXd MEig = Map<MatrixXd>(rot_mat,m_dim,m_dim);
304  for (int i = 0; i < m_dim; i++)
305  for (int j = 0; j < m_dim; j++)
306  M(i,j)*=scalings[k*m_dim + j];
307  MatrixXd rotE = Map<MatrixXd>(rot_mat,m_dim,m_dim);
308  MatrixXd resE(m_dim,m_dim);
309  resE = MEig * rotE.transpose();
310  memcpy(m_covs.get_matrix(k),resE.data(),m_dim*m_dim*sizeof(float64_t));
311  }
312  }
313 
314  /* Computation of terms required for classification */
315  SGVector< float32_t > sinvsqrt(m_dim);
316 
317  // M_dims will be freed in m_M.destroy_ndarray()
318  index_t* M_dims = SG_MALLOC(index_t, 3);
319  M_dims[0] = m_dim;
320  M_dims[1] = m_dim;
321  M_dims[2] = m_num_classes;
322  m_M = SGNDArray< float64_t >(M_dims, 3);
323 
324  m_slog = SGVector< float32_t >(m_num_classes);
325  m_slog.zero();
326 
327  index_t idx = 0;
328  for (int k = 0; k < m_num_classes; k++)
329  {
330  for (int j = 0; j < m_dim; j++)
331  {
332  sinvsqrt[j] = 1.0 / CMath::sqrt(scalings[k*m_dim + j]);
333  m_slog[k] += CMath::log(scalings[k*m_dim + j]);
334  }
335 
336  for (int i = 0; i < m_dim; i++)
337  for (int j = 0; j < m_dim; j++)
338  {
339  idx = k*m_dim*m_dim + i + j*m_dim;
340  m_M[idx] = rotations[idx] * sinvsqrt[j];
341  }
342  }
343 
344 #ifdef DEBUG_QDA
345  SG_PRINT(">>> QDA machine trained with %d classes\n", m_num_classes)
346 
347  SG_PRINT("\n>>> Displaying means ...\n")
348  SGMatrix< float64_t >::display_matrix(m_means.matrix, m_dim, m_num_classes);
349 
350  SG_PRINT("\n>>> Displaying scalings ...\n")
351  SGMatrix< float64_t >::display_matrix(scalings.matrix, m_dim, m_num_classes);
352 
353  SG_PRINT("\n>>> Displaying rotations ... \n")
354  for (int k = 0; k < m_num_classes; k++)
355  SGMatrix< float64_t >::display_matrix(rotations.get_matrix(k), m_dim, m_dim);
356 
357  SG_PRINT("\n>>> Displaying sinvsqrt ... \n")
358  sinvsqrt.display_vector();
359 
360  SG_PRINT("\n>>> Diplaying m_M matrices ... \n")
361  for (int k = 0; k < m_num_classes; k++)
363 
364  SG_PRINT("\n>>> Exit DEBUG_QDA\n")
365 #endif
366 
367  SG_FREE(class_idxs);
368  SG_FREE(class_nums);
369  return true;
370 }
void display_matrix(const char *name="matrix") const
Definition: SGMatrix.cpp:390
static int32_t arg_max(T *vec, int32_t inc, int32_t len, T *maxv_ptr=NULL)
Definition: Math.h:262
experimental abstract native multiclass machine class
int32_t index_t
Definition: common.h:62
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
Definition: SGMatrix.h:20
virtual int32_t get_num_vectors() const =0
CLabels * m_labels
Definition: Machine.h:361
#define SG_ERROR(...)
Definition: SGIO.h:129
Features that support dot products among other operations.
Definition: DotFeatures.h:44
T * get_matrix(index_t matIdx) const
Definition: SGNDArray.h:72
virtual int32_t get_dim_feature_space() const =0
static void scale_vector(T alpha, T *vec, int32_t len)
Scale vector inplace.
Definition: SGVector.cpp:820
void display_vector(const char *name="vector", const char *prefix="") const
Definition: SGVector.cpp:354
Multiclass Labels for multi-class classification.
index_t vlen
Definition: SGVector.h:492
#define SG_PRINT(...)
Definition: SGIO.h:137
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:115
virtual CMulticlassLabels * apply_multiclass(CFeatures *data=NULL)
Definition: QDA.cpp:106
static void vector_multiply(T *target, const T *v1, const T *v2, int32_t len)
Compute vector multiplication.
Definition: SGVector.h:324
double float64_t
Definition: common.h:50
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:113
#define SG_UNREF(x)
Definition: SGObject.h:55
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual void set_features(CDotFeatures *feat)
Definition: QDA.h:125
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual ~CQDA()
Definition: QDA.cpp:77
static float64_t log(float64_t v)
Definition: Math.h:922
virtual bool train_machine(CFeatures *data=NULL)
Definition: QDA.cpp:181
#define SG_ADD(...)
Definition: SGObject.h:84
static float32_t sqrt(float32_t x)
Definition: Math.h:459
bool has_property(EFeatureProperty p) const
Definition: Features.cpp:295
virtual void set_labels(CLabels *lab)

SHOGUN Machine Learning Toolbox - Documentation