23 mkl_block_norm(1),beta_local(NULL), mkl_iterations(0), mkl_epsilon(1e-5),
24 interleaved_optimization(true), w_gap(1.0), rho(0)
37 SG_DEBUG(
"creating MKL object %p\n",
this)
46 SG_DEBUG(
"deleting MKL object %p\n",
this)
74 SG_INFO(
"trying to initialize CPLEX\n")
77 env = CPXopenCPLEX (&status);
82 SG_WARNING(
"Could not open CPLEX environment.\n")
83 CPXgeterrorstring (
env, status, errmsg);
91 status = CPXsetintparam (
env, CPX_PARAM_LPMETHOD, CPX_ALG_DUAL);
94 SG_ERROR(
"Failure to select dual lp optimization, error %d.\n", status)
98 status = CPXsetintparam (
env, CPX_PARAM_DATACHECK, CPX_ON);
101 SG_ERROR(
"Failure to turn on data checking, error %d.\n", status)
130 SG_WARNING(
"CPXfreeprob failed, error code %d.\n", status)
137 int32_t status = CPXcloseCPLEX (&
env);
143 SG_WARNING(
"Could not close CPLEX environment.\n")
144 CPXgeterrorstring (
env, status, errmsg);
158 glp_set_obj_dir(
lp_glpk, GLP_MIN);
165 glp_term_out(GLP_OFF);
181 int status = glp_get_status(lp);
183 if (status==GLP_INFEAS)
185 SG_PRINT(
"solution is infeasible!\n")
188 else if(status==GLP_NOFEAS)
190 SG_PRINT(
"problem has no feasible solution!\n")
206 SG_ERROR(
"%s::train_machine(): Number of training vectors (%d) does"
207 " not match number of labels (%d)\n",
get_name(),
215 SG_ERROR(
"No constraint generator (SVM) set\n")
232 int32_t num_weights = -1;
234 SG_INFO(
"num_kernels = %d\n", num_kernels)
237 ASSERT(num_weights==num_kernels)
300 SG_ERROR(
"Interleaved MKL optimization is currently "
301 "only supported with SVMlight\n");
314 #ifdef USE_REFERENCE_COUNTING
315 int32_t refs=this->ref();
321 #ifdef USE_REFERENCE_COUNTING
341 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
364 for (int32_t i=0; i<nsv; i++)
377 SG_ERROR(
"Norm must be >= 1, e.g., 1-norm is the standard MKL; norms>1 nonsparse MKL\n")
384 if (lambda>1 || lambda<0)
389 else if (lambda==1.0)
408 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
415 ASSERT(nweights==num_kernels)
418 #if defined(USE_CPLEX) || defined(USE_GLPK)
419 int32_t inner_iters=0;
424 for (int32_t i=0; i<num_kernels; i++)
427 mkl_objective+=old_beta[i]*sumw[i];
461 SG_ERROR(
"Solver type not supported (not compiled in?)\n")
482 int32_t nofKernelsGood;
485 nofKernelsGood = num_kernels;
488 for (p=0; p<num_kernels; ++p )
490 if (sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
492 beta[p] =
CMath::sqrt(sumw[p]*old_beta[p]*old_beta[p]);
509 for( p=0; p<num_kernels; ++p )
514 SG_PRINT(
"MKL-direct: p = %.3f\n", 1.0 )
515 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
516 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
517 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
518 for( p=0; p<num_kernels; ++p )
523 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
524 SG_PRINT(
"MKL-direct: preR/p = %e\n", preR )
526 SG_PRINT(
"MKL-direct: R = %e\n", R )
527 SG_ERROR(
"Assertion R >= 0 failed!\n" )
531 for( p=0; p<num_kernels; ++p )
539 for( p=0; p<num_kernels; ++p )
548 for( p=0; p<num_kernels; ++p )
556 for (p=0; p<num_kernels; ++p )
559 obj += sumw[p] * beta[p];
568 std::list<int32_t> I;
570 for (int32_t i=0; i<len;i++)
580 for (std::list<int32_t>::iterator it=I.begin(); it!=I.end(); it++)
609 for (int32_t i=0; i<n; i++)
613 for (int32_t j=0; j<n; j++)
649 }
while(ff>ff_old+1e-4*gg_old*(del-del_old));
661 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
676 for( p=0; p<num_kernels; ++p )
690 for( p=0; p<num_kernels; ++p )
695 for( p=0; p<num_kernels; ++p )
696 obj += sumw[p] * beta[p];
712 int32_t nofKernelsGood;
715 nofKernelsGood = num_kernels;
716 for( p=0; p<num_kernels; ++p )
719 if( sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
721 beta[p] = sumw[p] * old_beta[p]*old_beta[p] /
mkl_norm;
734 for( p=0; p<num_kernels; ++p )
739 for( p=0; p<num_kernels; ++p )
744 for( p=0; p<num_kernels; ++p )
745 preR +=
CMath::sq( old_beta[p] - beta[p]);
751 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
752 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
753 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
754 for( p=0; p<num_kernels; ++p )
757 SG_PRINT(
"MKL-direct: t[%3d] = %e ( diff = %e = %e - %e )\n", p, t, old_beta[p]-beta[p], old_beta[p], beta[p] )
759 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
762 SG_PRINT(
"MKL-direct: R = %e\n", R )
763 SG_ERROR(
"Assertion R >= 0 failed!\n" )
767 for( p=0; p<num_kernels; ++p )
775 for( p=0; p<num_kernels; ++p )
785 for( p=0; p<num_kernels; ++p )
786 obj += sumw[p] * beta[p];
792 const float64_t* old_beta, int32_t num_kernels,
799 SG_ERROR(
"MKL via NEWTON works only for norms>1\n")
801 const double epsBeta = 1e-32;
802 const double epsGamma = 1e-12;
803 const double epsWsq = 1e-12;
804 const double epsNewt = 0.0001;
805 const double epsStep = 1e-9;
806 const int nofNewtonSteps = 3;
807 const double hessRidge = 1e-6;
808 const int inLogSpace = 0;
823 for( p=0; p<num_kernels; ++p )
825 beta[p] = old_beta[p];
826 if( !( beta[p] >= epsBeta ) )
829 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
834 if( !( fabs(Z-1.0) <= epsGamma ) )
836 SG_WARNING(
"old_beta not normalized (diff=%e); forcing normalization. ", Z-1.0 )
837 for( p=0; p<num_kernels; ++p )
842 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
848 for ( p=0; p<num_kernels; ++p )
850 if ( !( sumw[p] >= 0 ) )
852 if( !( sumw[p] >= -epsWsq ) )
853 SG_WARNING(
"sumw[%d] = %e; treated as 0. ", p, sumw[p] )
865 if( !( gamma > epsGamma ) )
867 SG_WARNING(
"bad gamma: %e; set to %e. ", gamma, epsGamma )
871 ASSERT( gamma >= epsGamma )
876 for( p=0; p<num_kernels; ++p )
878 obj += beta[p] * sumw[p];
881 if( !( obj >= 0.0 ) )
887 for (i = 0; i < nofNewtonSteps; ++i )
892 for( p=0; p<num_kernels; ++p )
894 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
898 const float halfw2p = ( sumw[p] >= 0.0 ) ? (sumw[p]*old_beta[p]*old_beta[p]) : 0.0;
900 const float64_t t1 = ( t0 < 0 ) ? 0.0 : t0;
903 newtDir[p] = t1 / ( t1 + t2*beta[p] + hessRidge );
905 newtDir[p] = ( t1 == 0.0 ) ? 0.0 : ( t1 / t2 );
907 ASSERT( newtDir[p] == newtDir[p] )
915 while( stepSize >= epsStep )
921 for( p=0; p<num_kernels; ++p )
924 newtBeta[p] = beta[p] *
CMath::exp( + stepSize * newtDir[p] );
926 newtBeta[p] = beta[p] + stepSize * newtDir[p];
927 if( !( newtBeta[p] >= epsBeta ) )
928 newtBeta[p] = epsBeta;
940 for( p=0; p<num_kernels; ++p )
943 if( newtBeta[p] > 1.0 )
948 ASSERT( 0.0 <= newtBeta[p] && newtBeta[p] <= 1.0 )
954 for( p=0; p<num_kernels; ++p )
955 newtObj += sumw[p] * old_beta[p]*old_beta[p] / newtBeta[p];
957 if ( newtObj < obj - epsNewt*stepSize*obj )
959 for( p=0; p<num_kernels; ++p )
960 beta[p] = newtBeta[p];
968 if( stepSize < epsStep )
976 for( p=0; p<num_kernels; ++p )
977 obj += beta[p] * sumw[p];
992 int32_t NUMCOLS = 2*num_kernels + 1;
993 double* x=SG_MALLOC(
double, NUMCOLS);
1003 for (int32_t i=0; i<2*num_kernels; i++)
1010 for (int32_t i=num_kernels; i<2*num_kernels; i++)
1013 obj[2*num_kernels]=1 ;
1014 lb[2*num_kernels]=-CPX_INFBOUND ;
1015 ub[2*num_kernels]=CPX_INFBOUND ;
1017 int status = CPXnewcols (
env,
lp_cplex, NUMCOLS, obj, lb, ub, NULL, NULL);
1020 CPXgeterrorstring (
env, status, errmsg);
1025 SG_INFO(
"adding the first row\n")
1026 int initial_rmatbeg[1];
1027 int initial_rmatind[num_kernels+1];
1028 double initial_rmatval[num_kernels+1];
1029 double initial_rhs[1];
1030 char initial_sense[1];
1035 initial_rmatbeg[0] = 0;
1037 initial_sense[0]=
'E' ;
1040 for (int32_t i=0; i<num_kernels; i++)
1042 initial_rmatind[i]=i ;
1043 initial_rmatval[i]=1 ;
1045 initial_rmatind[num_kernels]=2*num_kernels ;
1046 initial_rmatval[num_kernels]=0 ;
1048 status = CPXaddrows (
env,
lp_cplex, 0, 1, num_kernels+1,
1049 initial_rhs, initial_sense, initial_rmatbeg,
1050 initial_rmatind, initial_rmatval, NULL, NULL);
1055 initial_rmatbeg[0] = 0;
1057 initial_sense[0]=
'L' ;
1059 initial_rmatind[0]=2*num_kernels ;
1060 initial_rmatval[0]=0 ;
1063 initial_rhs, initial_sense, initial_rmatbeg,
1064 initial_rmatind, initial_rmatval, NULL, NULL);
1069 for (int32_t i=0; i<num_kernels; i++)
1071 initial_rmatind[i]=i ;
1072 initial_rmatval[i]=1 ;
1074 initial_rmatind[num_kernels]=2*num_kernels ;
1075 initial_rmatval[num_kernels]=0 ;
1077 status = CPXaddqconstr (
env,
lp_cplex, 0, num_kernels+1, 1.0,
'L', NULL, NULL,
1078 initial_rmatind, initial_rmatind, initial_rmatval, NULL);
1084 SG_ERROR(
"Failed to add the first row.\n")
1090 for (int32_t q=0; q<num_kernels-1; q++)
1107 rmatind[2]=num_kernels+q ;
1110 rhs, sense, rmatbeg,
1111 rmatind, rmatval, NULL, NULL);
1113 SG_ERROR(
"Failed to add a smothness row (1).\n")
1122 rmatind[2]=num_kernels+q ;
1125 rhs, sense, rmatbeg,
1126 rmatind, rmatval, NULL, NULL);
1128 SG_ERROR(
"Failed to add a smothness row (2).\n")
1137 int rmatind[num_kernels+1];
1138 double rmatval[num_kernels+1];
1150 for (int32_t i=0; i<num_kernels; i++)
1154 rmatval[i]=-(sumw[i]-suma) ;
1156 rmatval[i]=-sumw[i];
1158 rmatind[num_kernels]=2*num_kernels ;
1159 rmatval[num_kernels]=-1 ;
1161 int32_t status = CPXaddrows (
env,
lp_cplex, 0, 1, num_kernels+1,
1162 rhs, sense, rmatbeg,
1163 rmatind, rmatval, NULL, NULL);
1165 SG_ERROR(
"Failed to add the new row.\n")
1180 for (int32_t i=0; i<num_kernels; i++)
1181 beta[i]=old_beta[i];
1182 for (int32_t i=num_kernels; i<2*num_kernels+1; i++)
1190 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1196 SG_ERROR(
"Failed to optimize Problem.\n")
1200 status=CPXsolution(
env,
lp_cplex, &solstat, &objval,
1201 (
double*) beta, NULL, NULL, NULL);
1205 CMath::display_vector(beta, num_kernels,
"beta");
1206 SG_ERROR(
"Failed to obtain solution.\n")
1209 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1223 SG_ERROR(
"Failed to optimize Problem.\n")
1226 int32_t cur_numrows=(int32_t) CPXgetnumrows(
env,
lp_cplex);
1227 int32_t cur_numcols=(int32_t) CPXgetnumcols(
env,
lp_cplex);
1228 int32_t num_rows=cur_numrows;
1229 ASSERT(cur_numcols<=2*num_kernels+1)
1240 status=CPXsolution(
env,
lp_cplex, &solstat, &objval,
1241 (
double*) x, (
double*) pi, (
double*) slack, NULL);
1245 status=CPXsolution(
env,
lp_cplex, &solstat, &objval,
1246 (
double*) x, NULL, (
double*) slack, NULL);
1249 int32_t solution_ok = (!status) ;
1251 SG_ERROR(
"Failed to obtain solution.\n")
1253 int32_t num_active_rows=0 ;
1258 int32_t max_idx = -1 ;
1259 int32_t start_row = 1 ;
1261 start_row+=2*(num_kernels-1);
1263 for (int32_t i = start_row; i < cur_numrows; i++)
1271 if (slack[i]>max_slack)
1273 max_slack=slack[i] ;
1284 if (slack[i]>max_slack)
1286 max_slack=slack[i] ;
1294 if ( (num_rows-start_row>
CMath::max(100,2*num_active_rows)) && (max_idx!=-1))
1297 status = CPXdelrows (
env,
lp_cplex, max_idx, max_idx) ;
1299 SG_ERROR(
"Failed to remove an old row.\n")
1304 rho = -x[2*num_kernels] ;
1316 for (int32_t i=0; i<num_kernels; i++)
1321 SG_ERROR(
"Cplex not enabled at compile time\n")
1332 SG_ERROR(
"MKL via GLPK works only for norm=1\n")
1336 int32_t NUMCOLS = 2*num_kernels + 1 ;
1342 glp_add_cols(
lp_glpk, NUMCOLS);
1343 for (
int i=1; i<=2*num_kernels; i++)
1345 glp_set_obj_coef(
lp_glpk, i, 0);
1346 glp_set_col_bnds(
lp_glpk, i, GLP_DB, 0, 1);
1348 for (
int i=num_kernels+1; i<=2*num_kernels; i++)
1352 glp_set_obj_coef(
lp_glpk, NUMCOLS, 1);
1353 glp_set_col_bnds(
lp_glpk, NUMCOLS, GLP_FR, 0,0);
1356 int row_index = glp_add_rows(
lp_glpk, 1);
1357 int* ind = SG_MALLOC(
int, num_kernels+2);
1359 for (
int i=1; i<=num_kernels; i++)
1364 ind[num_kernels+1] = NUMCOLS;
1365 val[num_kernels+1] = 0;
1366 glp_set_mat_row(
lp_glpk, row_index, num_kernels, ind, val);
1367 glp_set_row_bnds(
lp_glpk, row_index, GLP_FX, 1, 1);
1375 for (int32_t q=1; q<num_kernels; q++)
1379 int mat_row_index = glp_add_rows(
lp_glpk, 2);
1384 mat_ind[3] = num_kernels+q;
1386 glp_set_mat_row(
lp_glpk, mat_row_index, 3, mat_ind, mat_val);
1387 glp_set_row_bnds(
lp_glpk, mat_row_index, GLP_UP, 0, 0);
1390 glp_set_mat_row(
lp_glpk, mat_row_index+1, 3, mat_ind, mat_val);
1391 glp_set_row_bnds(
lp_glpk, mat_row_index+1, GLP_UP, 0, 0);
1396 int* ind=SG_MALLOC(
int,num_kernels+2);
1398 int row_index = glp_add_rows(
lp_glpk, 1);
1399 for (int32_t i=1; i<=num_kernels; i++)
1402 val[i] = -(sumw[i-1]-suma);
1404 ind[num_kernels+1] = 2*num_kernels+1;
1405 val[num_kernels+1] = -1;
1406 glp_set_mat_row(
lp_glpk, row_index, num_kernels+1, ind, val);
1407 glp_set_row_bnds(
lp_glpk, row_index, GLP_UP, 0, 0);
1415 SG_ERROR(
"Failed to optimize Problem.\n")
1417 int32_t cur_numrows = glp_get_num_rows(
lp_glpk);
1418 int32_t cur_numcols = glp_get_num_cols(
lp_glpk);
1419 int32_t num_rows=cur_numrows;
1420 ASSERT(cur_numcols<=2*num_kernels+1)
1426 for (
int i=0; i<cur_numrows; i++)
1428 row_primal[i] = glp_get_row_prim(
lp_glpk, i+1);
1429 row_dual[i] = glp_get_row_dual(
lp_glpk, i+1);
1431 for (
int i=0; i<cur_numcols; i++)
1432 col_primal[i] = glp_get_col_prim(
lp_glpk, i+1);
1434 obj = -col_primal[2*num_kernels];
1436 for (
int i=0; i<num_kernels; i++)
1437 beta[i] = col_primal[i];
1439 int32_t num_active_rows=0;
1443 int32_t max_idx = -1;
1444 int32_t start_row = 1;
1446 start_row += 2*(num_kernels-1);
1448 for (int32_t i= start_row; i<cur_numrows; i++)
1454 if (row_primal[i]<max_slack)
1456 max_slack = row_primal[i];
1462 if ((num_rows-start_row>
CMath::max(100, 2*num_active_rows)) && max_idx!=-1)
1465 del_rows[1] = max_idx+1;
1466 glp_del_rows(
lp_glpk, 1, del_rows);
1471 SG_FREE(row_primal);
1472 SG_FREE(col_primal);
1474 SG_ERROR(
"Glpk not enabled at compile time\n")
1490 ASSERT(nweights==num_kernels)
1493 for (int32_t i=0; i<num_kernels; i++)
1499 for (int32_t n=0; n<num_kernels; n++)
1506 for (int32_t i=0; i<nsv; i++)
1510 for (int32_t j=0; j<nsv; j++)
1542 for (int32_t i=0; i<n; i++)
1546 for (int32_t j=0; j<n; j++)
1562 mkl_obj=-0.5*mkl_obj;
1569 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
1582 int* ind=SG_MALLOC(
int, num_kernels+1);
1585 double const_term = 1-CMath::qsq(beta, num_kernels,
mkl_norm);
1588 ASSERT(CMath::fequal(const_term, 0.0))
1590 for (int32_t i=0; i<num_kernels; i++)
1594 lin_term[i]=grad_beta[i] - 2*beta[i]*hess_beta[i];
1595 const_term+=grad_beta[i]*beta[i] -
CMath::sq(beta[i])*hess_beta[i];
1598 ind[num_kernels]=2*num_kernels;
1599 hess_beta[num_kernels]=0;
1600 lin_term[num_kernels]=0;
1611 status = CPXaddqconstr (
env,
lp_cplex, num_kernels+1, num_kernels+1, const_term,
'L', ind, lin_term,
1612 ind, ind, hess_beta, NULL);
void set_shrinking_enabled(bool enable)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
int32_t get_num_support_vectors()
void set_bias_enabled(bool enable_bias)
void set_mkl_block_norm(float64_t q)
void set_max_train_time(float64_t t)
double norm(double *v, double p, int n)
bool check_glp_status(glp_prob *lp)
static const float64_t INFTY
infinity
virtual int32_t get_num_labels() const =0
virtual void init_training()=0
void elasticnet_transform(float64_t *beta, float64_t lmd, int32_t len)
float64_t compute_optimal_betas_block_norm(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
virtual int32_t get_num_vectors() const =0
virtual void compute_sum_beta(float64_t *sumw)
void set_callback_function(CMKL *m, bool(*cb)(CMKL *mkl, const float64_t *sumw, const float64_t suma))
float64_t compute_optimal_betas_directly(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t kernel(int32_t idx_a, int32_t idx_b)
void set_mkl_norm(float64_t norm)
void set_nu(float64_t nue)
float64_t compute_elasticnet_dual_objective()
virtual bool perform_mkl_step(const float64_t *sumw, float64_t suma)
virtual float64_t compute_mkl_dual_objective()
static void scale_vector(T alpha, T *vec, int32_t len)
Scale vector inplace.
static T * clone_vector(const T *vec, int32_t len)
float64_t cur_time_diff(bool verbose=false)
CTime training_time_clock
float64_t compute_optimal_betas_newton(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, float64_t mkl_objective)
void set_constraint_generator(CSVM *s)
bool get_batch_computation_enabled()
void set_bias(float64_t bias)
void set_batch_computation_enabled(bool enable)
static void clear_cancel()
bool get_shrinking_enabled()
void elasticnet_dual(float64_t *ff, float64_t *gg, float64_t *hh, const float64_t &del, const float64_t *nm, int32_t len, const float64_t &lambda)
bool set_alpha(int32_t idx, float64_t val)
float64_t start(bool verbose=false)
void set_qpsize(int32_t qps)
float64_t get_max_train_time()
float64_t get_alpha(int32_t idx)
ESolverType get_solver_type()
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
bool set_support_vector(int32_t idx, int32_t val)
virtual EMachineType get_classifier_type()
void set_qnorm_constraints(float64_t *beta, int32_t num_kernels)
int32_t get_support_vector(int32_t idx)
bool interleaved_optimization
static bool cancel_computations()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
float64_t compute_optimal_betas_elasticnet(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t compute_optimal_betas_via_cplex(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
all of classes and functions are contained in the shogun namespace
bool get_linadd_enabled()
virtual float64_t compute_sum_alpha()=0
virtual bool train_machine(CFeatures *data=NULL)
virtual EKernelType get_kernel_type()=0
virtual const char * get_name() const
The class Features is the base class of all feature objects.
static float64_t exp(float64_t x)
virtual bool train(CFeatures *data=NULL)
virtual int32_t get_num_subkernels()
A generic Support Vector Machine Interface.
void set_linadd_enabled(bool enable)
void set_elasticnet_lambda(float64_t elasticnet_lambda)
void set_epsilon(float64_t eps)
void set_kernel(CKernel *k)
static float32_t sqrt(float32_t x)
virtual void set_labels(CLabels *lab)
static bool perform_mkl_step_helper(CMKL *mkl, const float64_t *sumw, const float64_t suma)
void set_solver_type(ESolverType st)
static int32_t pow(bool x, int32_t n)
void set_C(float64_t c_neg, float64_t c_pos)
float64_t compute_optimal_betas_via_glpk(float64_t *beta, const float64_t *old_beta, int num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
bool create_new_model(int32_t num)