24 using namespace Eigen;
30 slep_result_t slep_mc_tree_lr(
34 const slep_options& options)
47 MatrixXd w = MatrixXd::Zero(n_feats, n_classes);
49 VectorXd c = VectorXd::Zero(n_classes);
51 if (options.last_result)
55 for (i=0; i<n_classes; i++)
58 for (j=0; j<n_feats; j++)
63 MatrixXd wp = w, wwp = MatrixXd::Zero(n_feats, n_classes);
64 VectorXd cp = c, ccp = VectorXd::Zero(n_classes);
66 MatrixXd search_w = MatrixXd::Zero(n_feats, n_classes);
68 VectorXd search_c = VectorXd::Zero(n_classes);
70 MatrixXd Aw = MatrixXd::Zero(n_vecs, n_classes);
71 for (j=0; j<n_classes; j++)
72 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
73 MatrixXd As = MatrixXd::Zero(n_vecs, n_classes);
74 MatrixXd Awp = MatrixXd::Zero(n_vecs, n_classes);
76 MatrixXd g = MatrixXd::Zero(n_feats, n_classes);
77 VectorXd gc = VectorXd::Zero(n_classes);
79 MatrixXd v = MatrixXd::Zero(n_feats, n_classes);
82 double L = 1.0/(n_vecs*n_classes);
84 double alphap = 0, alpha = 1;
89 double objective = 0.0;
90 double objective_p = 0.0;
98 double beta = (alphap-1)/alpha;
100 search_w = w + beta*wwp;
101 search_c = c + beta*ccp;
104 As = Aw + beta*(Aw-Awp);
111 for (i=0; i<n_vecs; i++)
114 int vec_class = labels_vector[i];
116 for (j=0; j<n_classes; j++)
119 double aa = ((vec_class == j) ? -1.0 : 1.0)*(As(i,j) + search_c(j));
120 double bb = aa > 0.0 ? aa : 0.0;
124 double b = ((vec_class == j) ? -1.0 : 1.0)*(1-prob);
141 while (inner_iter<5000)
149 general_altra_mt(w.data(),v.data(),n_classes,n_feats,options.G,options.ind_t,options.n_nodes,lambda/L);
151 altra_mt(w.data(),v.data(),n_classes,n_feats,options.ind_t,options.n_nodes,lambda/L);
155 for (j=0; j<n_classes; j++)
156 features->
dense_dot_range(Aw.col(j).data(), 0, n_vecs, NULL, w.col(j).data(), n_feats, 0.0);
160 for (i=0; i<n_vecs; i++)
162 int vec_class = labels_vector[i];
163 for (j=0; j<n_classes; j++)
165 double aa = ((vec_class == j) ? -1.0 : 1.0)*(Aw(i,j) + c(j));
166 double bb = aa > 0.0 ? aa : 0.0;
173 double r_sum = (v.squaredNorm() + (c-search_c).squaredNorm())/2;
174 double l_sum = fun_x - fun_s - v.cwiseProduct(g).sum() - (c-search_c).
dot(gc);
179 SG_SINFO(
"Gradient step makes little improvement (%f)\n",r_sum)
184 if (l_sum <= r_sum*L)
194 alpha = (1+
CMath::sqrt(4*alpha*alpha+1))/2;
201 objective_p = objective;
205 double tree_norm = 0.0;
208 for (i=0; i<n_classes; i++)
209 tree_norm += general_treeNorm(w.col(i).data(),n_classes,n_feats,options.G,options.ind_t,options.n_nodes);
213 for (i=0; i<n_classes; i++)
214 tree_norm += treeNorm(w.col(i).data(),n_classes,n_feats,options.ind_t,options.n_nodes);
218 objective += lambda*tree_norm;
223 if ((
CMath::abs(objective - objective_p) < options.tolerance*
CMath::abs(objective_p)) && (iter>2))
225 SG_SINFO(
"Objective changes less than tolerance\n")
231 SG_SINFO("%d iterations passed, objective = %f\n",iter,objective)
236 for (j=0; j<n_classes; j++)
238 for (i=0; i<n_feats; i++)
243 for (j=0; j<n_classes; j++)
245 return slep_result_t(r_w, r_c);
249 #endif //USE_GPL_SHOGUN
Class Time that implements a stopwatch based on either cpu time or wall clock time.
virtual void dense_dot_range(float64_t *output, int32_t start, int32_t stop, float64_t *alphas, float64_t *vec, int32_t dim, float64_t b)
Vector::Scalar dot(Vector a, Vector b)
virtual int32_t get_num_vectors() const =0
virtual void add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t *vec2, int32_t vec2_len, bool abs_val=false)=0
Features that support dot products among other operations.
int32_t get_num_classes()
virtual int32_t get_dim_feature_space() const =0
SGVector< float64_t > get_labels()
Multiclass Labels for multi-class classification.
static bool cancel_computations()
all of classes and functions are contained in the shogun namespace
static float64_t exp(float64_t x)
static float64_t log(float64_t v)
Class which collects generic mathematical functions.
Matrix::Scalar max(Matrix m)