34 #ifndef DOXYGEN_SHOULD_SKIP_THIS 
   49 l2r_lr_fun::l2r_lr_fun(
const liblinear_problem *p, 
float64_t* Cs)
 
   55     z = SG_MALLOC(
double, l);
 
   56     D = SG_MALLOC(
double, l);
 
   60 l2r_lr_fun::~l2r_lr_fun()
 
   67 double l2r_lr_fun::fun(
double *w)
 
   78         double yz = y[i]*z[i];
 
   80             f += C[i]*log(1 + exp(-yz));
 
   82             f += C[i]*(-yz+log(1 + exp(yz)));
 
   89 void l2r_lr_fun::grad(
double *w, 
double *g)
 
   94     int w_size=get_nr_variable();
 
   98         z[i] = 1/(1 + exp(-y[i]*z[i]));
 
  100         z[i] = C[i]*(z[i]-1)*y[i];
 
  104     for(i=0;i<w_size;i++)
 
  108 int l2r_lr_fun::get_nr_variable()
 
  113 void l2r_lr_fun::Hv(
double *s, 
double *Hs)
 
  117     int w_size=get_nr_variable();
 
  118     double *wa = SG_MALLOC(
double, l);
 
  122         wa[i] = C[i]*D[i]*wa[i];
 
  125     for(i=0;i<w_size;i++)
 
  126         Hs[i] = s[i] + Hs[i];
 
  130 void l2r_lr_fun::Xv(
double *v, 
double *res_Xv)
 
  136     if (m_prob->use_bias)
 
  142     m_prob->x->dense_dot_range(res_Xv, 0, l, NULL, v, n, bias);
 
  145 void l2r_lr_fun::XTv(
double *v, 
double *res_XTv)
 
  150     memset(res_XTv, 0, 
sizeof(
double)*m_prob->n);
 
  152     if (m_prob->use_bias)
 
  155     for (int32_t i=0;i<l;i++)
 
  157         m_prob->x->add_to_dense_vec(v[i], i, res_XTv, n);
 
  159         if (m_prob->use_bias)
 
  164 l2r_l2_svc_fun::l2r_l2_svc_fun(
const liblinear_problem *p, 
double* Cs)
 
  170     z = SG_MALLOC(
double, l);
 
  171     D = SG_MALLOC(
double, l);
 
  172     I = SG_MALLOC(
int, l);
 
  177 l2r_l2_svc_fun::~l2r_l2_svc_fun()
 
  184 double l2r_l2_svc_fun::fun(
double *w)
 
  190     int w_size=get_nr_variable();
 
  205 void l2r_l2_svc_fun::grad(
double *w, 
double *g)
 
  210     int w_size=get_nr_variable();
 
  216             z[sizeI] = C[i]*y[i]*(z[i]-1);
 
  222     for(i=0;i<w_size;i++)
 
  223         g[i] = w[i] + 2*g[i];
 
  226 int l2r_l2_svc_fun::get_nr_variable()
 
  231 void l2r_l2_svc_fun::Hv(
double *s, 
double *Hs)
 
  235     int w_size=get_nr_variable();
 
  236     double *wa = SG_MALLOC(
double, l);
 
  240         wa[i] = C[I[i]]*wa[i];
 
  243     for(i=0;i<w_size;i++)
 
  244         Hs[i] = s[i] + 2*Hs[i];
 
  248 void l2r_l2_svc_fun::Xv(
double *v, 
double *res_Xv)
 
  254     if (m_prob->use_bias)
 
  260     m_prob->x->dense_dot_range(res_Xv, 0, l, NULL, v, n, bias);
 
  263 void l2r_l2_svc_fun::subXv(
double *v, 
double *res_Xv)
 
  268     if (m_prob->use_bias)
 
  274     m_prob->x->dense_dot_range_subset(I, sizeI, res_Xv, NULL, v, n, bias);
 
  285 void l2r_l2_svc_fun::subXTv(
double *v, 
double *XTv)
 
  289     if (m_prob->use_bias)
 
  292     memset(XTv, 0, 
sizeof(
float64_t)*m_prob->n);
 
  293     for (int32_t i=0;i<sizeI;i++)
 
  295         m_prob->x->add_to_dense_vec(v[i], I[i], XTv, n);
 
  297         if (m_prob->use_bias)
 
  302 l2r_l2_svr_fun::l2r_l2_svr_fun(
const liblinear_problem *prob, 
double *Cs, 
double p):
 
  303     l2r_l2_svc_fun(prob, Cs)
 
  308 double l2r_l2_svr_fun::fun(
double *w)
 
  314     int w_size=get_nr_variable();
 
  319     for(i=0;i<w_size;i++)
 
  326             f += C[i]*(d+m_p)*(d+m_p);
 
  328             f += C[i]*(d-m_p)*(d-m_p);
 
  334 void l2r_l2_svr_fun::grad(
double *w, 
double *g)
 
  339     int w_size=get_nr_variable();
 
  350             z[sizeI] = C[i]*(d+m_p);
 
  356             z[sizeI] = C[i]*(d-m_p);
 
  364     for(i=0;i<w_size;i++)
 
  365         g[i] = w[i] + 2*g[i];
 
  387 #define GETI(i) (prob->y[i]) 
  390 Solver_MCSVM_CS::Solver_MCSVM_CS(
const liblinear_problem *p, 
int n_class,
 
  391                                  double *weighted_C, 
double *w0_reg,
 
  392                                  double epsilon, 
int max_it, 
double max_time,
 
  393                                  mcsvm_state* given_state)
 
  397     this->nr_class = n_class;
 
  399     this->max_iter = max_it;
 
  401     this->C = weighted_C;
 
  403     this->max_train_time = max_time;
 
  404     this->state = given_state;
 
  407 Solver_MCSVM_CS::~Solver_MCSVM_CS()
 
  411 int compare_double(
const void *a, 
const void *b)
 
  413     if(*(
double *)a > *(
double *)b)
 
  415     if(*(
double *)a < *(
double *)b)
 
  420 void Solver_MCSVM_CS::solve_sub_problem(
double A_i, 
int yi, 
double C_yi, 
int active_i, 
double *alpha_new)
 
  427     qsort(D, active_i, 
sizeof(
double), compare_double);
 
  429     double beta = D[0] - A_i*C_yi;
 
  430     for(r=1;r<active_i && beta<r*D[r];r++)
 
  434     for(r=0;r<active_i;r++)
 
  437             alpha_new[r] = 
CMath::min(C_yi, (beta-state->B[r])/A_i);
 
  439             alpha_new[r] = 
CMath::min((
double)0, (beta - state->B[r])/A_i);
 
  444 bool Solver_MCSVM_CS::be_shrunk(
int i, 
int m, 
int yi, 
double alpha_i, 
double minG)
 
  448         bound = C[int32_t(
GETI(i))];
 
  449     if(alpha_i == bound && state->G[m] < minG)
 
  454 void Solver_MCSVM_CS::solve()
 
  458     double *w,*B,*G,*alpha,*alpha_new,*QD,*d_val;
 
  459     int *index,*d_ind,*alpha_index,*y_index,*active_size_i;
 
  461     if (!state->allocated)
 
  463         state->w = SG_CALLOC(
double, nr_class*w_size);
 
  464         state->B = SG_CALLOC(
double, nr_class);
 
  465         state->G = SG_CALLOC(
double, nr_class);
 
  466         state->alpha = SG_CALLOC(
double, l*nr_class);
 
  467         state->alpha_new = SG_CALLOC(
double, nr_class);
 
  468         state->index = SG_CALLOC(
int, l);
 
  469         state->QD = SG_CALLOC(
double, l);
 
  470         state->d_ind = SG_CALLOC(
int, nr_class);
 
  471         state->d_val = SG_CALLOC(
double, nr_class);
 
  472         state->alpha_index = SG_CALLOC(
int, nr_class*l);
 
  473         state->y_index = SG_CALLOC(
int, l);
 
  474         state->active_size_i = SG_CALLOC(
int, l);
 
  475         state->allocated = 
true;
 
  480     alpha = state->alpha;
 
  481     alpha_new = state->alpha_new;
 
  482     index = state->index;
 
  484     d_ind = state->d_ind;
 
  485     d_val = state->d_val;
 
  486     alpha_index = state->alpha_index;
 
  487     y_index = state->y_index;
 
  488     active_size_i = state->active_size_i;
 
  490     double* tx = SG_MALLOC(
double, w_size);
 
  491     int dim = prob->x->get_dim_feature_space();
 
  494     double eps_shrink = 
CMath::max(10.0*eps, 1.0); 
 
  495     bool start_from_all = 
true;
 
  502             for(m=0;m<nr_class;m++)
 
  503                 alpha_index[i*nr_class+m] = m;
 
  505             QD[i] = prob->x->dot(i, prob->x,i);
 
  509             active_size_i[i] = nr_class;
 
  510             y_index[i] = prob->y[i];
 
  513         state->inited = 
true;
 
  519         for(i=0;i<active_size;i++)
 
  524         for(s=0;s<active_size;s++)
 
  528             double *alpha_i = &alpha[i*nr_class];
 
  529             int *alpha_index_i = &alpha_index[i*nr_class];
 
  533                 for(m=0;m<active_size_i[i];m++)
 
  535                 if(y_index[i] < active_size_i[i])
 
  538                 memset(tx,0,dim*
sizeof(
double));
 
  539                 prob->x->add_to_dense_vec(1.0,i,tx,dim);
 
  540                 for (k=0; k<dim; k++)
 
  545                     double* w_i = &w[k*nr_class];
 
  546                     for (m=0; m<active_size_i[i]; m++)
 
  547                         G[m] += w_i[alpha_index_i[m]]*tx[k];
 
  554                     double *w_i = &w[(w_size-1)*nr_class];
 
  555                     for(m=0; m<active_size_i[i]; m++)
 
  556                         G[m] += w_i[alpha_index_i[m]];
 
  560                     for (k=0; k<dim; k++)
 
  562                         double *w0_i = &w0[k*nr_class];
 
  563                         for(m=0; m<active_size_i[i]; m++)
 
  564                             G[m] += w0_i[alpha_index_i[m]];
 
  571                 for(m=0;m<active_size_i[i];m++)
 
  573                     if(alpha_i[alpha_index_i[m]] < 0 && G[m] < minG)
 
  578                 if(y_index[i] < active_size_i[i])
 
  579                     if(alpha_i[int32_t(prob->y[i])] < C[int32_t(
GETI(i))] && G[y_index[i]] < minG)
 
  580                         minG = G[y_index[i]];
 
  582                 for(m=0;m<active_size_i[i];m++)
 
  584                     if(be_shrunk(i, m, y_index[i], alpha_i[alpha_index_i[m]], minG))
 
  587                         while(active_size_i[i]>m)
 
  589                             if(!be_shrunk(i, active_size_i[i], y_index[i],
 
  590                                             alpha_i[alpha_index_i[active_size_i[i]]], minG))
 
  592                                 CMath::swap(alpha_index_i[m], alpha_index_i[active_size_i[i]]);
 
  594                                 if(y_index[i] == active_size_i[i])
 
  596                                 else if(y_index[i] == m)
 
  597                                     y_index[i] = active_size_i[i];
 
  605                 if(active_size_i[i] <= 1)
 
  613                 if(maxG-minG <= 1e-12)
 
  618                 for(m=0;m<active_size_i[i];m++)
 
  619                     B[m] = G[m] - Ai*alpha_i[alpha_index_i[m]] ;
 
  621                 solve_sub_problem(Ai, y_index[i], C[int32_t(
GETI(i))], active_size_i[i], alpha_new);
 
  623                 for(m=0;m<active_size_i[i];m++)
 
  625                     double d = alpha_new[m] - alpha_i[alpha_index_i[m]];
 
  626                     alpha_i[alpha_index_i[m]] = alpha_new[m];
 
  629                         d_ind[nz_d] = alpha_index_i[m];
 
  635                 memset(tx,0,dim*
sizeof(
double));
 
  636                 prob->x->add_to_dense_vec(1.0,i,tx,dim);
 
  637                 for (k=0; k<dim; k++)
 
  642                     double* w_i = &w[k*nr_class];
 
  643                     for (m=0; m<nz_d; m++)
 
  644                         w_i[d_ind[m]] += d_val[m]*tx[k];
 
  650                     double *w_i = &w[(w_size-1)*nr_class];
 
  652                         w_i[d_ind[m]] += d_val[m];
 
  666         if(stopping < eps_shrink)
 
  668             if(stopping < eps && start_from_all == 
true)
 
  674                     active_size_i[i] = nr_class;
 
  677                 start_from_all = 
true;
 
  681             start_from_all = 
false;
 
  683         if (max_train_time!=0.0 && max_train_time < start_time.
cur_time_diff())
 
  687     SG_SINFO(
"\noptimization finished, #iter = %d\n",iter)
 
  688     if (iter >= max_iter)
 
  689         SG_SINFO("Warning: reaching 
max number of iterations\n")
 
  698 void destroy_model(struct liblinear_model *model_)
 
  701     SG_FREE(model_->label);
 
  705 void destroy_param(liblinear_parameter* param)
 
  707     SG_FREE(param->weight_label);
 
  708     SG_FREE(param->weight);
 
  710 #endif // DOXYGEN_SHOULD_SKIP_THIS 
Class Time that implements a stopwatch based on either cpu time or wall clock time. 
static const float64_t INFTY
infinity 
static T * clone_vector(const T *vec, int32_t len)
static const float64_t epsilon
float64_t cur_time_diff(bool verbose=false)
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized) 
static bool cancel_computations()
all of classes and functions are contained in the shogun namespace 
static void swap(T &a, T &b)
Matrix::Scalar max(Matrix m)