SHOGUN  v3.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
QDA.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012 Fernando José Iglesias García
8  * Copyright (C) 2012 Fernando José Iglesias García
9  */
10 
11 #include <shogun/lib/common.h>
12 
13 #ifdef HAVE_EIGEN3
14 
15 #include <shogun/multiclass/QDA.h>
18 #include <shogun/labels/Labels.h>
21 
23 
24 using namespace shogun;
25 using namespace Eigen;
26 
27 CQDA::CQDA(float64_t tolerance, bool store_covs)
28 : CNativeMulticlassMachine(), m_tolerance(tolerance),
29  m_store_covs(store_covs), m_num_classes(0), m_dim(0)
30 {
31  init();
32 }
33 
34 CQDA::CQDA(CDenseFeatures<float64_t>* traindat, CLabels* trainlab, float64_t tolerance, bool store_covs)
35 : CNativeMulticlassMachine(), m_tolerance(tolerance), m_store_covs(store_covs), m_num_classes(0), m_dim(0)
36 {
37  init();
38  set_features(traindat);
39  set_labels(trainlab);
40 }
41 
43 {
44  SG_UNREF(m_features);
45 
46  cleanup();
47 }
48 
49 void CQDA::init()
50 {
51  SG_ADD(&m_tolerance, "m_tolerance", "Tolerance member.", MS_AVAILABLE);
52  SG_ADD(&m_store_covs, "m_store_covs", "Store covariances member", MS_NOT_AVAILABLE);
53  SG_ADD((CSGObject**) &m_features, "m_features", "Feature object.", MS_NOT_AVAILABLE);
54  SG_ADD(&m_means, "m_means", "Mean vectors list", MS_NOT_AVAILABLE);
55  SG_ADD(&m_slog, "m_slog", "Vector used in classification", MS_NOT_AVAILABLE);
56 
57  //TODO include SGNDArray objects for serialization
58 
59  m_features = NULL;
60 }
61 
62 void CQDA::cleanup()
63 {
64  m_means=SGMatrix<float64_t>();
65 
66  m_num_classes = 0;
67 }
68 
70 {
71  if (data)
72  {
73  if (!data->has_property(FP_DOT))
74  SG_ERROR("Specified features are not of type CDotFeatures\n")
75 
76  set_features((CDotFeatures*) data);
77  }
78 
79  if ( !m_features )
80  return NULL;
81 
82  int32_t num_vecs = m_features->get_num_vectors();
83  ASSERT(num_vecs > 0)
84  ASSERT( m_dim == m_features->get_dim_feature_space() )
85 
87 
88  MatrixXd X(num_vecs, m_dim);
89  MatrixXd A(num_vecs, m_dim);
90  VectorXd norm2(num_vecs*m_num_classes);
91  norm2.setZero();
92 
93  int32_t vlen;
94  bool vfree;
95  float64_t* vec;
96  for (int k = 0; k < m_num_classes; k++)
97  {
98  // X = features - means
99  for (int i = 0; i < num_vecs; i++)
100  {
101  vec = rf->get_feature_vector(i, vlen, vfree);
102  ASSERT(vec)
103 
104  Map< VectorXd > Evec(vec,vlen);
105  Map< VectorXd > Em_means_col(m_means.get_column_vector(k), m_dim);
106 
107  X.row(i) = Evec - Em_means_col;
108 
109  rf->free_feature_vector(vec, i, vfree);
110  }
111 
112  Map< MatrixXd > Em_M(m_M.get_matrix(k), m_dim, m_dim);
113  A = X*Em_M;
114 
115  for (int i = 0; i < num_vecs; i++)
116  norm2(i + k*num_vecs) = A.row(i).array().square().sum();
117 
118 #ifdef DEBUG_QDA
119  SG_PRINT("\n>>> Displaying A ...\n")
120  SGMatrix< float64_t >::display_matrix(A.data(), num_vecs, m_dim);
121 #endif
122  }
123 
124  for (int i = 0; i < num_vecs; i++)
125  for (int k = 0; k < m_num_classes; k++)
126  {
127  norm2[i + k*num_vecs] += m_slog[k];
128  norm2[i + k*num_vecs] *= -0.5;
129  }
130 
131 #ifdef DEBUG_QDA
132  SG_PRINT("\n>>> Displaying norm2 ...\n")
133  SGMatrix< float64_t >::display_matrix(norm2.data(), num_vecs, m_num_classes);
134 #endif
135 
136  CMulticlassLabels* out = new CMulticlassLabels(num_vecs);
137 
138  for (int i = 0 ; i < num_vecs; i++)
139  out->set_label(i, SGVector<float64_t>::arg_max(norm2.data()+i, num_vecs, m_num_classes));
140 
141  return out;
142 }
143 
145 {
146  if (!m_labels)
147  SG_ERROR("No labels allocated in QDA training\n")
148 
149  if ( data )
150  {
151  if (!data->has_property(FP_DOT))
152  SG_ERROR("Speficied features are not of type CDotFeatures\n")
153 
154  set_features((CDotFeatures*) data);
155  }
156 
157  if (!m_features)
158  SG_ERROR("No features allocated in QDA training\n")
159 
160  SGVector< int32_t > train_labels = ((CMulticlassLabels*) m_labels)->get_int_labels();
161 
162  if (!train_labels.vector)
163  SG_ERROR("No train_labels allocated in QDA training\n")
164 
165  cleanup();
166 
167  m_num_classes = ((CMulticlassLabels*) m_labels)->get_num_classes();
168  m_dim = m_features->get_dim_feature_space();
169  int32_t num_vec = m_features->get_num_vectors();
170 
171  if (num_vec != train_labels.vlen)
172  SG_ERROR("Dimension mismatch between features and labels in QDA training")
173 
174  int32_t* class_idxs = SG_MALLOC(int32_t, num_vec*m_num_classes); // number of examples of each class
175  int32_t* class_nums = SG_MALLOC(int32_t, m_num_classes);
176  memset(class_nums, 0, m_num_classes*sizeof(int32_t));
177  int32_t class_idx;
178 
179  for (int i = 0; i < train_labels.vlen; i++)
180  {
181  class_idx = train_labels.vector[i];
182 
183  if (class_idx < 0 || class_idx >= m_num_classes)
184  {
185  SG_ERROR("found label out of {0, 1, 2, ..., num_classes-1}...")
186  return false;
187  }
188  else
189  {
190  class_idxs[ class_idx*num_vec + class_nums[class_idx]++ ] = i;
191  }
192  }
193 
194  for (int i = 0; i < m_num_classes; i++)
195  {
196  if (class_nums[i] <= 0)
197  {
198  SG_ERROR("What? One class with no elements\n")
199  return false;
200  }
201  }
202 
203  if (m_store_covs)
204  {
205  // cov_dims will be free in m_covs.destroy_ndarray()
206  index_t * cov_dims = SG_MALLOC(index_t, 3);
207  cov_dims[0] = m_dim;
208  cov_dims[1] = m_dim;
209  cov_dims[2] = m_num_classes;
210  m_covs = SGNDArray< float64_t >(cov_dims, 3);
211  }
212 
213  m_means = SGMatrix< float64_t >(m_dim, m_num_classes, true);
214  SGMatrix< float64_t > scalings = SGMatrix< float64_t >(m_dim, m_num_classes);
215 
216  // rot_dims will be freed in rotations.destroy_ndarray()
217  index_t* rot_dims = SG_MALLOC(index_t, 3);
218  rot_dims[0] = m_dim;
219  rot_dims[1] = m_dim;
220  rot_dims[2] = m_num_classes;
221  SGNDArray< float64_t > rotations = SGNDArray< float64_t >(rot_dims, 3);
222 
224 
225  m_means.zero();
226 
227  int32_t vlen;
228  bool vfree;
229  float64_t* vec;
230  for (int k = 0; k < m_num_classes; k++)
231  {
232  MatrixXd buffer(class_nums[k], m_dim);
233  Map< VectorXd > Em_means(m_means.get_column_vector(k), m_dim);
234  for (int i = 0; i < class_nums[k]; i++)
235  {
236  vec = rf->get_feature_vector(class_idxs[k*num_vec + i], vlen, vfree);
237  ASSERT(vec)
238 
239  Map< VectorXd > Evec(vec, vlen);
240  Em_means += Evec;
241  buffer.row(i) = Evec;
242 
243  rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
244  }
245 
246  Em_means /= class_nums[k];
247 
248  for (int i = 0; i < class_nums[k]; i++)
249  buffer.row(i) -= Em_means;
250 
251  // SVD
252  float64_t * col = scalings.get_column_vector(k);
253  float64_t * rot_mat = rotations.get_matrix(k);
254 
255  Eigen::JacobiSVD<MatrixXd> eSvd;
256  eSvd.compute(buffer,Eigen::ComputeFullV);
257  memcpy(col, eSvd.singularValues().data(), m_dim*sizeof(float64_t));
258  memcpy(rot_mat, eSvd.matrixV().data(), m_dim*m_dim*sizeof(float64_t));
259 
260  SGVector<float64_t>::vector_multiply(col, col, col, m_dim);
261  SGVector<float64_t>::scale_vector(1.0/(class_nums[k]-1), col, m_dim);
262  rotations.transpose_matrix(k);
263 
264  if (m_store_covs)
265  {
266  SGMatrix< float64_t > M(m_dim ,m_dim);
267  MatrixXd MEig = Map<MatrixXd>(rot_mat,m_dim,m_dim);
268  for (int i = 0; i < m_dim; i++)
269  for (int j = 0; j < m_dim; j++)
270  M(i,j)*=scalings[k*m_dim + j];
271  MatrixXd rotE = Map<MatrixXd>(rot_mat,m_dim,m_dim);
272  MatrixXd resE(m_dim,m_dim);
273  resE = MEig * rotE.transpose();
274  memcpy(m_covs.get_matrix(k),resE.data(),m_dim*m_dim*sizeof(float64_t));
275  }
276  }
277 
278  /* Computation of terms required for classification */
279  SGVector< float32_t > sinvsqrt(m_dim);
280 
281  // M_dims will be freed in m_M.destroy_ndarray()
282  index_t* M_dims = SG_MALLOC(index_t, 3);
283  M_dims[0] = m_dim;
284  M_dims[1] = m_dim;
285  M_dims[2] = m_num_classes;
286  m_M = SGNDArray< float64_t >(M_dims, 3);
287 
288  m_slog = SGVector< float32_t >(m_num_classes);
289  m_slog.zero();
290 
291  index_t idx = 0;
292  for (int k = 0; k < m_num_classes; k++)
293  {
294  for (int j = 0; j < m_dim; j++)
295  {
296  sinvsqrt[j] = 1.0 / CMath::sqrt(scalings[k*m_dim + j]);
297  m_slog[k] += CMath::log(scalings[k*m_dim + j]);
298  }
299 
300  for (int i = 0; i < m_dim; i++)
301  for (int j = 0; j < m_dim; j++)
302  {
303  idx = k*m_dim*m_dim + i + j*m_dim;
304  m_M[idx] = rotations[idx] * sinvsqrt[j];
305  }
306  }
307 
308 #ifdef DEBUG_QDA
309  SG_PRINT(">>> QDA machine trained with %d classes\n", m_num_classes)
310 
311  SG_PRINT("\n>>> Displaying means ...\n")
312  SGMatrix< float64_t >::display_matrix(m_means.matrix, m_dim, m_num_classes);
313 
314  SG_PRINT("\n>>> Displaying scalings ...\n")
315  SGMatrix< float64_t >::display_matrix(scalings.matrix, m_dim, m_num_classes);
316 
317  SG_PRINT("\n>>> Displaying rotations ... \n")
318  for (int k = 0; k < m_num_classes; k++)
319  SGMatrix< float64_t >::display_matrix(rotations.get_matrix(k), m_dim, m_dim);
320 
321  SG_PRINT("\n>>> Displaying sinvsqrt ... \n")
322  sinvsqrt.display_vector();
323 
324  SG_PRINT("\n>>> Diplaying m_M matrices ... \n")
325  for (int k = 0; k < m_num_classes; k++)
327 
328  SG_PRINT("\n>>> Exit DEBUG_QDA\n")
329 #endif
330 
331  SG_FREE(class_idxs);
332  SG_FREE(class_nums);
333  return true;
334 }
335 
336 #endif /* HAVE_EIGEN3 */

SHOGUN Machine Learning Toolbox - Documentation