SHOGUN  v2.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LeastAngleRegression.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012 Chiyuan Zhang
8  * Copyright (C) 2012 Chiyuan Zhang
9  */
10 
11 #include <shogun/lib/config.h>
12 
13 #ifdef HAVE_LAPACK
14 
15 #include <vector>
16 #include <limits>
17 #include <algorithm>
18 
24 
25 using namespace shogun;
26 using namespace std;
27 
28 static vector<float64_t> make_vector(int32_t size, float64_t val)
29 {
30  vector<float64_t> result(size);
31  fill(result.begin(), result.end(), val);
32  return result;
33 }
34 
35 static void plane_rot(float64_t x0, float64_t x1,
37 {
38  G.zero();
39  if (x1 == 0)
40  {
41  G(0, 0) = G(1, 1) = 1;
42  y0 = x0;
43  y1 = x1;
44  }
45  else
46  {
47  float64_t r = CMath::sqrt(x0*x0+x1*x1);
48  float64_t sx0 = x0 / r;
49  float64_t sx1 = x1 / r;
50 
51  G(0,0) = sx0;
52  G(1,0) = -sx1;
53  G(0,1) = sx1;
54  G(1,1) = sx0;
55 
56  y0 = r;
57  y1 = 0;
58  }
59 }
60 
61 static void find_max_abs(const vector<float64_t> &vec, const vector<bool> &ignore,
62  int32_t &imax, float64_t& vmax)
63 {
64  imax = -1;
65  vmax = -1;
66  for (uint32_t i=0; i < vec.size(); ++i)
67  {
68  if (ignore[i])
69  continue;
70  if (CMath::abs(vec[i]) > vmax)
71  {
72  vmax = CMath::abs(vec[i]);
73  imax = i;
74  }
75  }
76 }
77 
79  CLinearMachine(), m_lasso(lasso),
80  m_max_nonz(0), m_max_l1_norm(0)
81 {
82  SG_ADD(&m_max_nonz, "max_nonz", "Max number of non-zero variables", MS_AVAILABLE);
83  SG_ADD(&m_max_l1_norm, "max_l1_norm", "Max l1-norm of estimator", MS_AVAILABLE);
84 }
85 
87 {
88 
89 }
90 
92 {
93  if (!m_labels)
94  SG_ERROR("No labels set\n");
96  SG_ERROR("Expected RegressionLabels\n");
97 
98  if (!data)
99  data=features;
100 
101  if (!data)
102  SG_ERROR("No features set\n");
103 
104  if (m_labels->get_num_labels() != data->get_num_vectors())
105  SG_ERROR("Number of training vectors does not match number of labels\n");
106 
107  if (data->get_feature_class() != C_DENSE)
108  SG_ERROR("Expected Simple Features\n");
109 
110  if (data->get_feature_type() != F_DREAL)
111  SG_ERROR("Expected Real Features\n");
112 
114  int32_t n_fea = feats->get_num_features();
115  int32_t n_vec = feats->get_num_vectors();
116 
117  bool lasso_cond = false;
118  bool stop_cond = false;
119 
120  // init facilities
121  m_beta_idx.clear();
122  m_beta_path.clear();
123  m_num_active = 0;
124  m_active_set.clear();
125  m_is_active.resize(n_fea);
126  fill(m_is_active.begin(), m_is_active.end(), false);
127 
128  SGVector<float64_t> y = ((CRegressionLabels*) m_labels)->get_labels();
129  SGMatrix<float64_t> Xorig = feats->get_feature_matrix();
130 
131  // transpose(X) is more convenient to work with since we care
132  // about features here. After transpose, each row will be a data
133  // point while each column corresponds to a feature
134  SGMatrix<float64_t> X(n_vec, n_fea, true);
135  for (int32_t i=0; i < n_vec; ++i)
136  {
137  for (int32_t j=0; j < n_fea; ++j)
138  X(i,j) = Xorig(j,i);
139  }
140 
141  // beta is the estimator
142  vector<float64_t> beta = make_vector(n_fea, 0);
143 
144  vector<float64_t> Xy = make_vector(n_fea, 0);
145  // Xy = X' * y
146  cblas_dgemv(CblasColMajor, CblasTrans, n_vec, n_fea, 1, X.matrix, n_vec,
147  y.vector, 1, 0, &Xy[0], 1);
148 
149  // mu is the prediction
150  vector<float64_t> mu = make_vector(n_vec, 0);
151 
152  // correlation
153  vector<float64_t> corr = make_vector(n_fea, 0);
154  // sign of correlation
155  vector<float64_t> corr_sign(n_fea);
156 
157  // Cholesky factorization R'R = X'X, R is upper triangular
159 
160  float64_t max_corr = 1;
161  int32_t i_max_corr = 1;
162 
163  // first entry: all coefficients are zero
164  m_beta_path.push_back(beta);
165  m_beta_idx.push_back(0);
166 
167  //========================================
168  // main loop
169  //========================================
170  int32_t nloop=0;
171  while (m_num_active < n_fea && max_corr > CMath::MACHINE_EPSILON && !stop_cond)
172  {
173  // corr = X' * (y-mu) = - X'*mu + Xy
174  copy(Xy.begin(), Xy.end(), corr.begin());
175  cblas_dgemv(CblasColMajor, CblasTrans, n_vec, n_fea, -1,
176  X.matrix, n_vec, &mu[0], 1, 1, &corr[0], 1);
177 
178  // corr_sign = sign(corr)
179  for (uint32_t i=0; i < corr.size(); ++i)
180  corr_sign[i] = CMath::sign(corr[i]);
181 
182  // find max absolute correlation in inactive set
183  find_max_abs(corr, m_is_active, i_max_corr, max_corr);
184 
185  if (!lasso_cond)
186  {
187  // update Cholesky factorization matrix
188  R=cholesky_insert(X, R, i_max_corr);
189  activate_variable(i_max_corr);
190  }
191 
192  // corr_sign_a = corr_sign[m_active_set]
193  vector<float64_t> corr_sign_a(m_num_active);
194  for (int32_t i=0; i < m_num_active; ++i)
195  corr_sign_a[i] = corr_sign[m_active_set[i]];
196 
197  // GA1 = R\(R'\corr_sign_a)
198  vector<float64_t> GA1(corr_sign_a);
199  cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit,
200  m_num_active, 1, 1, R.matrix, m_num_active, &GA1[0], m_num_active);
201  cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit,
202  m_num_active, 1, 1, R.matrix, m_num_active, &GA1[0], m_num_active);
203 
204  // AA = 1/sqrt(GA1' * corr_sign_a)
205  float64_t AA = cblas_ddot(m_num_active, &GA1[0], 1, &corr_sign_a[0], 1);
206  AA = 1/CMath::sqrt(AA);
207 
208  // wA = AA*GA1
209  vector<float64_t> wA(GA1);
210  for (int32_t i=0; i < m_num_active; ++i)
211  wA[i] *= AA;
212 
213  // equiangular direction (unit vector)
214  vector<float64_t> u = make_vector(n_vec, 0);
215  // u = X[:,m_active_set] * wA
216  for (int32_t i=0; i < m_num_active; ++i)
217  {
218  // u += wA[i] * X[:,m_active_set[i]]
219  cblas_daxpy(n_vec, wA[i],
220  X.get_column_vector(m_active_set[i]), 1, &u[0], 1);
221  }
222 
223  // step size
224  float64_t gamma = max_corr / AA;
225  if (m_num_active < n_fea)
226  {
227  for (int32_t i=0; i < n_fea; ++i)
228  {
229  if (m_is_active[i])
230  continue;
231 
232  // correlation between X[:,i] and u
233  float64_t dir_corr = cblas_ddot(n_vec,
234  X.get_column_vector(i), 1, &u[0], 1);
235 
236  float64_t tmp1 = (max_corr-corr[i])/(AA-dir_corr);
237  float64_t tmp2 = (max_corr+corr[i])/(AA+dir_corr);
238  if (tmp1 > CMath::MACHINE_EPSILON && tmp1 < gamma)
239  gamma = tmp1;
240  if (tmp2 > CMath::MACHINE_EPSILON && tmp2 < gamma)
241  gamma = tmp2;
242  }
243  }
244 
245  int32_t i_kick=-1;
246  int32_t i_change=i_max_corr;
247  if (m_lasso)
248  {
249  // lasso modification to further refine gamma
250  lasso_cond = false;
251  float64_t lasso_bound = numeric_limits<float64_t>::max();
252 
253  for (int32_t i=0; i < m_num_active; ++i)
254  {
255  float64_t tmp = -beta[m_active_set[i]] / wA[i];
256  if (tmp > CMath::MACHINE_EPSILON && tmp < lasso_bound)
257  {
258  lasso_bound = tmp;
259  i_kick = i;
260  }
261  }
262 
263  if (lasso_bound < gamma)
264  {
265  gamma = lasso_bound;
266  lasso_cond = true;
267  i_change = m_active_set[i_kick];
268  }
269  }
270 
271  // update prediction: mu = mu + gamma * u
272  cblas_daxpy(n_vec, gamma, &u[0], 1, &mu[0], 1);
273 
274  // update estimator
275  for (int32_t i=0; i < m_num_active; ++i)
276  beta[m_active_set[i]] += gamma * wA[i];
277 
278  // early stopping on max l1-norm
279  if (m_max_l1_norm > 0)
280  {
281  float64_t l1 = SGVector<float64_t>::onenorm(&beta[0], n_fea);
282  if (l1 > m_max_l1_norm)
283  {
284  // stopping with interpolated beta
285  stop_cond = true;
286  lasso_cond = false;
287  float64_t l1_prev = SGVector<float64_t>::onenorm(&m_beta_path[nloop][0], n_fea);
288  float64_t s = (m_max_l1_norm-l1_prev)/(l1-l1_prev);
289 
290  // beta = beta_prev + s*(beta-beta_prev)
291  // = (1-s)*beta_prev + s*beta
292  cblas_dscal(n_fea, s, &beta[0], 1);
293  cblas_daxpy(n_fea, 1-s, &m_beta_path[nloop][0], 1, &beta[0], 1);
294  }
295  }
296 
297  // if lasso cond, drop the variable from active set
298  if (lasso_cond)
299  {
300  beta[i_change] = 0; // ensure it be zero
301 
302  // update Cholesky factorization
303  R=cholesky_delete(R, i_kick);
304  deactivate_variable(i_kick);
305  }
306 
307  nloop++;
308  m_beta_path.push_back(beta);
309  if (size_t(m_num_active) >= m_beta_idx.size())
310  m_beta_idx.push_back(nloop);
311  else
312  m_beta_idx[m_num_active] = nloop;
313 
314  // early stopping with max number of non-zero variables
315  if (m_max_nonz > 0 && m_num_active >= m_max_nonz)
316  stop_cond = true;
317 
318  } // main loop
319 
320  // assign default estimator
321  w.vlen = n_fea;
322  switch_w(m_beta_idx.size()-1);
323 
324  return true;
325 }
326 
327 SGMatrix<float64_t> CLeastAngleRegression::cholesky_insert(
328  SGMatrix<float64_t> X, SGMatrix<float64_t> R, int32_t i_max_corr)
329 {
330  // diag_k = X[:,i_max_corr]' * X[:,i_max_corr]
331  float64_t diag_k = cblas_ddot(X.num_rows, X.get_column_vector(i_max_corr), 1,
332  X.get_column_vector(i_max_corr), 1);
333 
334  if (m_num_active == 0)
335  { // R isn't allocated yet
336  SGMatrix<float64_t> nR(1,1);
337  nR(0,0) = CMath::sqrt(diag_k);
338  return nR;
339  }
340  else
341  {
342 
343  // col_k is the k-th column of (X'X)
344  vector<float64_t> col_k(m_num_active);
345  for (int32_t i=0; i < m_num_active; ++i)
346  {
347  // col_k[i] = X[:,i_max_corr]' * X[:,m_active_set[i]]
348  col_k[i] = cblas_ddot(X.num_rows, X.get_column_vector(i_max_corr), 1,
349  X.get_column_vector(m_active_set[i]), 1);
350  }
351 
352  // R' * R_k = (X' * X)_k = col_k, solving to get R_k
353  vector<float64_t> R_k(col_k);
354  cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, m_num_active, 1,
355  1, R.matrix, m_num_active, &R_k[0], m_num_active);
356 
357  float64_t R_kk = CMath::sqrt(diag_k -
358  cblas_ddot(m_num_active, &R_k[0], 1, &R_k[0], 1));
359 
360  // new_R = [R R_k; zeros(...) R_kk]
361  SGMatrix<float64_t> nR(m_num_active+1, m_num_active+1);
362  for (int32_t i=0; i < m_num_active; ++i)
363  for (int32_t j=0; j < m_num_active; ++j)
364  nR(i,j) = R(i,j);
365  for (int32_t i=0; i < m_num_active; ++i)
366  nR(i, m_num_active) = R_k[i];
367  for (int32_t i=0; i < m_num_active; ++i)
368  nR(m_num_active, i) = 0;
369  nR(m_num_active, m_num_active) = R_kk;
370 
371  return nR;
372  }
373 
374 }
375 
376 SGMatrix<float64_t> CLeastAngleRegression::cholesky_delete(SGMatrix<float64_t> R, int32_t i_kick)
377 {
378  if (i_kick != m_num_active-1)
379  {
380  // remove i_kick-th column
381  for (int32_t j=i_kick; j < m_num_active-1; ++j)
382  for (int32_t i=0; i < m_num_active; ++i)
383  R(i,j) = R(i,j+1);
384 
385  SGMatrix<float64_t> G(2,2);
386  for (int32_t i=i_kick; i < m_num_active-1; ++i)
387  {
388  plane_rot(R(i,i),R(i+1,i), R(i,i), R(i+1,i), G);
389  if (i < m_num_active-2)
390  {
391  for (int32_t k=i+1; k < m_num_active-1; ++k)
392  {
393  // R[i:i+1, k] = G*R[i:i+1, k]
394  float64_t Rik = R(i,k), Ri1k = R(i+1,k);
395  R(i,k) = G(0,0)*Rik + G(0,1)*Ri1k;
396  R(i+1,k) = G(1,0)*Rik+G(1,1)*Ri1k;
397  }
398  }
399  }
400  }
401 
402  SGMatrix<float64_t> nR(m_num_active-1, m_num_active-1);
403  for (int32_t i=0; i < m_num_active-1; ++i)
404  for (int32_t j=0; j < m_num_active-1; ++j)
405  nR(i,j) = R(i,j);
406 
407  return nR;
408 }
409 
410 #endif // HAVE_LAPACK

SHOGUN Machine Learning Toolbox - Documentation