22 using namespace Eigen;
28 slep_result_t slep_mc_plain_lr(
32 const slep_options& options)
45 MatrixXd w = MatrixXd::Zero(n_feats, n_classes);
47 VectorXd c = VectorXd::Zero(n_classes);
49 if (options.last_result)
53 for (i=0; i<n_classes; i++)
56 for (j=0; j<n_feats; j++)
61 MatrixXd wp = w, wwp = MatrixXd::Zero(n_feats, n_classes);
62 VectorXd cp = c, ccp = VectorXd::Zero(n_classes);
64 MatrixXd search_w = MatrixXd::Zero(n_feats, n_classes);
66 VectorXd search_c = VectorXd::Zero(n_classes);
68 MatrixXd Aw = MatrixXd::Zero(n_vecs, n_classes);
69 for (j=0; j<n_classes; j++)
70 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
71 MatrixXd As = MatrixXd::Zero(n_vecs, n_classes);
72 MatrixXd Awp = MatrixXd::Zero(n_vecs, n_classes);
74 MatrixXd g = MatrixXd::Zero(n_feats, n_classes);
75 VectorXd gc = VectorXd::Zero(n_classes);
77 MatrixXd v = MatrixXd::Zero(n_feats, n_classes);
80 double L = 1.0/(n_vecs*n_classes);
82 double alphap = 0, alpha = 1;
87 double objective = 0.0;
88 double objective_p = 0.0;
96 double beta = (alphap-1)/alpha;
98 search_w = w + beta*wwp;
99 search_c = c + beta*ccp;
102 As = Aw + beta*(Aw-Awp);
109 for (i=0; i<n_vecs; i++)
112 int vec_class = labels_vector[i];
114 for (j=0; j<n_classes; j++)
117 double aa = ((vec_class == j) ? -1.0 : 1.0)*(As(i,j) + search_c(j));
118 double bb = aa > 0.0 ? aa : 0.0;
122 double b = ((vec_class == j) ? -1.0 : 1.0)*(1-prob);
139 while (inner_iter<5000)
146 eppMatrix(w.data(),v.data(),n_feats,n_classes,lambda/L,options.q);
151 for (j=0; j<n_classes; j++)
152 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
156 for (i=0; i<n_vecs; i++)
158 int vec_class = labels_vector[i];
159 for (j=0; j<n_classes; j++)
161 double aa = ((vec_class == j) ? -1.0 : 1.0)*(Aw(i,j) + c(j));
162 double bb = aa > 0.0 ? aa : 0.0;
169 double r_sum = (v.squaredNorm() + (c-search_c).squaredNorm())/2;
170 double l_sum = fun_x - fun_s - v.cwiseProduct(g).sum() - (c-search_c).
dot(gc);
175 SG_SINFO(
"Gradient step makes little improvement (%f)\n",r_sum)
180 if (l_sum <= r_sum*L)
190 alpha = (1+
CMath::sqrt(4*alpha*alpha+1))/2;
197 objective_p = objective;
201 double L1q_norm = 0.0;
202 for (
int m=0; m<n_classes; m++)
203 L1q_norm += w.col(m).norm();
204 objective += lambda*L1q_norm;
209 if ((
CMath::abs(objective - objective_p) < options.tolerance*
CMath::abs(objective_p)) && (iter>2))
211 SG_SINFO(
"Objective changes less than tolerance\n")
217 SG_SINFO("%d iterations passed, objective = %f\n",iter,objective)
222 for (j=0; j<n_classes; j++)
224 for (i=0; i<n_feats; i++)
229 for (j=0; j<n_classes; j++)
231 return slep_result_t(r_w, r_c);
235 #endif //USE_GPL_SHOGUN
Class Time that implements a stopwatch based on either cpu time or wall clock time.
virtual void dense_dot_range(float64_t *output, int32_t start, int32_t stop, float64_t *alphas, float64_t *vec, int32_t dim, float64_t b)
Vector::Scalar dot(Vector a, Vector b)
virtual int32_t get_num_vectors() const =0
virtual void add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t *vec2, int32_t vec2_len, bool abs_val=false)=0
Features that support dot products among other operations.
int32_t get_num_classes()
virtual int32_t get_dim_feature_space() const =0
SGVector< float64_t > get_labels()
Multiclass Labels for multi-class classification.
static bool cancel_computations()
all of classes and functions are contained in the shogun namespace
static float64_t exp(float64_t x)
static float64_t log(float64_t v)
Class which collects generic mathematical functions.
Matrix::Scalar max(Matrix m)