30 vector<float64_t> result(size);
31 fill(result.begin(), result.end(), val);
41 G(0, 0) = G(1, 1) = 1;
61 static void find_max_abs(
const vector<float64_t> &vec,
const vector<bool> &ignore_mask,
66 for (uint32_t i=0; i < vec.size(); ++i)
81 m_max_nonz(0), m_max_l1_norm(0)
97 SG_ERROR(
"Expected RegressionLabels\n")
106 SG_ERROR(
"Number of training vectors does not match number of labels\n")
109 SG_ERROR(
"Expected Simple Features\n")
112 SG_ERROR(
"Expected Real Features\n")
118 bool lasso_cond =
false;
119 bool stop_cond =
false;
125 m_active_set.clear();
126 m_is_active.resize(n_fea);
127 fill(m_is_active.begin(), m_is_active.end(),
false);
136 for (int32_t i=0; i < n_vec; ++i)
138 for (int32_t j=0; j < n_fea; ++j)
147 cblas_dgemv(CblasColMajor, CblasTrans, n_vec, n_fea, 1, X.
matrix, n_vec,
148 y.vector, 1, 0, &Xy[0], 1);
156 vector<float64_t> corr_sign(n_fea);
162 int32_t i_max_corr = 1;
165 m_beta_path.push_back(beta);
166 m_beta_idx.push_back(0);
169 int32_t max_active_allowed =
CMath::min(n_vec-1, n_fea);
178 copy(Xy.begin(), Xy.end(), corr.begin());
179 cblas_dgemv(CblasColMajor, CblasTrans, n_vec, n_fea, -1,
180 X.
matrix, n_vec, &mu[0], 1, 1, &corr[0], 1);
183 for (uint32_t i=0; i < corr.size(); ++i)
192 R=cholesky_insert(X, R, i_max_corr);
193 activate_variable(i_max_corr);
197 vector<float64_t> corr_sign_a(m_num_active);
198 for (int32_t i=0; i < m_num_active; ++i)
199 corr_sign_a[i] = corr_sign[m_active_set[i]];
202 vector<float64_t> GA1(corr_sign_a);
203 cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit,
204 m_num_active, 1, 1, R.
matrix, m_num_active, &GA1[0], m_num_active);
205 cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit,
206 m_num_active, 1, 1, R.
matrix, m_num_active, &GA1[0], m_num_active);
209 float64_t AA = cblas_ddot(m_num_active, &GA1[0], 1, &corr_sign_a[0], 1);
213 vector<float64_t> wA(GA1);
214 for (int32_t i=0; i < m_num_active; ++i)
220 for (int32_t i=0; i < m_num_active; ++i)
223 cblas_daxpy(n_vec, wA[i],
229 if (m_num_active < n_fea)
231 for (int32_t i=0; i < n_fea; ++i)
240 float64_t tmp1 = (max_corr-corr[i])/(AA-dir_corr);
241 float64_t tmp2 = (max_corr+corr[i])/(AA+dir_corr);
250 int32_t i_change=i_max_corr;
257 for (int32_t i=0; i < m_num_active; ++i)
259 float64_t tmp = -beta[m_active_set[i]] / wA[i];
267 if (lasso_bound < gamma)
271 i_change = m_active_set[i_kick];
276 cblas_daxpy(n_vec, gamma, &u[0], 1, &mu[0], 1);
279 for (int32_t i=0; i < m_num_active; ++i)
280 beta[m_active_set[i]] += gamma * wA[i];
283 if (m_max_l1_norm > 0)
286 if (l1 > m_max_l1_norm)
292 float64_t s = (m_max_l1_norm-l1_prev)/(l1-l1_prev);
296 cblas_dscal(n_fea, s, &beta[0], 1);
297 cblas_daxpy(n_fea, 1-s, &m_beta_path[nloop][0], 1, &beta[0], 1);
307 R=cholesky_delete(R, i_kick);
308 deactivate_variable(i_kick);
312 m_beta_path.push_back(beta);
313 if (
size_t(m_num_active) >= m_beta_idx.size())
314 m_beta_idx.push_back(nloop);
316 m_beta_idx[m_num_active] = nloop;
319 if (m_max_nonz > 0 && m_num_active >= m_max_nonz)
338 if (m_num_active == 0)
348 vector<float64_t> col_k(m_num_active);
349 for (int32_t i=0; i < m_num_active; ++i)
357 vector<float64_t> R_k(col_k);
358 cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, m_num_active, 1,
359 1, R.
matrix, m_num_active, &R_k[0], m_num_active);
362 cblas_ddot(m_num_active, &R_k[0], 1, &R_k[0], 1));
366 for (int32_t i=0; i < m_num_active; ++i)
367 for (int32_t j=0; j < m_num_active; ++j)
369 for (int32_t i=0; i < m_num_active; ++i)
370 nR(i, m_num_active) = R_k[i];
371 for (int32_t i=0; i < m_num_active; ++i)
372 nR(m_num_active, i) = 0;
373 nR(m_num_active, m_num_active) = R_kk;
382 if (i_kick != m_num_active-1)
385 for (int32_t j=i_kick; j < m_num_active-1; ++j)
386 for (int32_t i=0; i < m_num_active; ++i)
390 for (int32_t i=i_kick; i < m_num_active-1; ++i)
392 plane_rot(R(i,i),R(i+1,i), R(i,i), R(i+1,i), G);
393 if (i < m_num_active-2)
395 for (int32_t k=i+1; k < m_num_active-1; ++k)
399 R(i,k) = G(0,0)*Rik + G(0,1)*Ri1k;
400 R(i+1,k) = G(1,0)*Rik+G(1,1)*Ri1k;
407 for (int32_t i=0; i < m_num_active-1; ++i)
408 for (int32_t j=0; j < m_num_active-1; ++j)
414 #endif // HAVE_LAPACK
static const float64_t MACHINE_EPSILON
virtual ELabelType get_label_type() const =0
Real Labels are real-valued labels.
int32_t get_num_features() const
static vector< float64_t > make_vector(int32_t size, float64_t val)
virtual int32_t get_num_labels() const =0
real valued labels (e.g. for regression, classifier outputs)
SGMatrix< ST > get_feature_matrix()
virtual int32_t get_num_vectors() const =0
virtual ~CLeastAngleRegression()
static void find_max_abs(const vector< float64_t > &vec, const vector< bool > &ignore_mask, int32_t &imax, float64_t &vmax)
virtual bool train_machine(CFeatures *data=NULL)
void switch_w(int32_t num_variable)
static void plane_rot(float64_t x0, float64_t x1, float64_t &y0, float64_t &y1, SGMatrix< float64_t > &G)
virtual int32_t get_num_vectors() const
virtual EFeatureClass get_feature_class() const =0
T * get_column_vector(index_t col) const
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
all of classes and functions are contained in the shogun namespace
CLeastAngleRegression(bool lasso=true)
The class Features is the base class of all feature objects.
static float64_t onenorm(T *x, int32_t len)
|| x ||_1
Matrix::Scalar max(Matrix m)
static float32_t sqrt(float32_t x)
virtual EFeatureType get_feature_type() const =0