21 using namespace Eigen;
31 const slep_options& options)
44 MatrixXd w = MatrixXd::Zero(n_feats, n_classes);
46 VectorXd c = VectorXd::Zero(n_classes);
48 if (options.last_result)
52 for (i=0; i<n_classes; i++)
55 for (j=0; j<n_feats; j++)
60 MatrixXd wp = w, wwp = MatrixXd::Zero(n_feats, n_classes);
61 VectorXd cp = c, ccp = VectorXd::Zero(n_classes);
63 MatrixXd search_w = MatrixXd::Zero(n_feats, n_classes);
65 VectorXd search_c = VectorXd::Zero(n_classes);
67 MatrixXd Aw = MatrixXd::Zero(n_vecs, n_classes);
68 for (j=0; j<n_classes; j++)
69 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
70 MatrixXd As = MatrixXd::Zero(n_vecs, n_classes);
71 MatrixXd Awp = MatrixXd::Zero(n_vecs, n_classes);
73 MatrixXd g = MatrixXd::Zero(n_feats, n_classes);
74 VectorXd gc = VectorXd::Zero(n_classes);
76 MatrixXd v = MatrixXd::Zero(n_feats, n_classes);
79 double L = 1.0/(n_vecs*n_classes);
81 double alphap = 0, alpha = 1;
86 double objective = 0.0;
87 double objective_p = 0.0;
95 double beta = (alphap-1)/alpha;
97 search_w = w + beta*wwp;
98 search_c = c + beta*ccp;
101 As = Aw + beta*(Aw-Awp);
108 for (i=0; i<n_vecs; i++)
111 int vec_class = labels_vector[i];
113 for (j=0; j<n_classes; j++)
116 double aa = ((vec_class == j) ? -1.0 : 1.0)*(As(i,j) + search_c(j));
117 double bb = aa > 0.0 ? aa : 0.0;
121 double b = ((vec_class == j) ? -1.0 : 1.0)*(1-prob);
138 while (inner_iter<5000)
145 eppMatrix(w.data(),v.data(),n_feats,n_classes,lambda/L,options.q);
150 for (j=0; j<n_classes; j++)
151 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
155 for (i=0; i<n_vecs; i++)
157 int vec_class = labels_vector[i];
158 for (j=0; j<n_classes; j++)
160 double aa = ((vec_class == j) ? -1.0 : 1.0)*(Aw(i,j) + c(j));
161 double bb = aa > 0.0 ? aa : 0.0;
168 double r_sum = (v.squaredNorm() + (c-search_c).squaredNorm())/2;
169 double l_sum = fun_x - fun_s - v.cwiseProduct(g).sum() - (c-search_c).
dot(gc);
174 SG_SINFO(
"Gradient step makes little improvement (%f)\n",r_sum)
179 if (l_sum <= r_sum*L)
196 objective_p = objective;
200 double L1q_norm = 0.0;
201 for (
int m=0; m<n_classes; m++)
202 L1q_norm += w.col(m).norm();
203 objective += lambda*L1q_norm;
208 if ((
CMath::abs(objective - objective_p) < options.tolerance*
CMath::abs(objective_p)) && (iter>2))
210 SG_SINFO(
"Objective changes less than tolerance\n")
216 SG_SINFO(
"%d iterations passed, objective = %f\n",iter,objective)
221 for (j=0; j<n_classes; j++)
223 for (i=0; i<n_feats; i++)
228 for (j=0; j<n_classes; j++)
230 return slep_result_t(r_w, r_c);
Class Time that implements a stopwatch based on either cpu time or wall clock time.
virtual void dense_dot_range(float64_t *output, int32_t start, int32_t stop, float64_t *alphas, float64_t *vec, int32_t dim, float64_t b)
Vector::Scalar dot(Vector a, Vector b)
virtual int32_t get_num_vectors() const =0
virtual void add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t *vec2, int32_t vec2_len, bool abs_val=false)=0
slep_result_t slep_mc_plain_lr(CDotFeatures *features, CMulticlassLabels *labels, float64_t z, const slep_options &options)
Features that support dot products among other operations.
int32_t get_num_classes()
virtual int32_t get_dim_feature_space() const =0
SGVector< float64_t > get_labels()
Multiclass Labels for multi-class classification.
void eppMatrix(double *X, double *V, int k, int n, double rho, double p)
static bool cancel_computations()
all of classes and functions are contained in the shogun namespace
static float64_t exp(float64_t x)
static float64_t log(float64_t v)
static float32_t sqrt(float32_t x)