25 #include <ilcplex/cplex.h>
55 SG_INFO(
"trying to initialize CPLEX\n")
58 env = CPXopenCPLEX (&status);
63 SG_WARNING(
"Could not open CPLEX environment.\n")
64 CPXgeterrorstring (
env, status, errmsg);
72 status = CPXsetintparam (
env, CPX_PARAM_LPMETHOD, CPX_ALG_DUAL);
75 SG_ERROR(
"Failure to select dual lp optimization, error %d.\n", status)
79 status = CPXsetintparam (
env, CPX_PARAM_DATACHECK, CPX_ON);
82 SG_ERROR(
"Failure to turn on data checking, error %d.\n", status)
107 int* ind=SG_MALLOC(
int, num_kernels+1);
110 double const_term = 1-CMath::qsq(beta, num_kernels,
mkl_norm);
113 ASSERT(CMath::fequal(const_term, 0.0))
115 for (int32_t i=0; i<num_kernels; i++)
119 lin_term[i]=grad_beta[i] - 2*beta[i]*hess_beta[i];
120 const_term+=grad_beta[i]*beta[i] -
CMath::sq(beta[i])*hess_beta[i];
123 ind[num_kernels]=2*num_kernels;
124 hess_beta[num_kernels]=0;
125 lin_term[num_kernels]=0;
136 status = CPXaddqconstr (
env,
lp_cplex, num_kernels+1, num_kernels+1, const_term,
'L', ind, lin_term,
137 ind, ind, hess_beta, NULL);
164 SG_WARNING(
"CPXfreeprob failed, error code %d.\n", status)
171 int32_t status = CPXcloseCPLEX (&
env);
177 SG_WARNING(
"Could not close CPLEX environment.\n")
178 CPXgeterrorstring (
env, status, errmsg);
197 glp_set_obj_dir(
lp_glpk, GLP_MIN);
204 glp_term_out(GLP_OFF);
220 int status = glp_get_status(
lp_glpk);
222 if (status==GLP_INFEAS)
227 else if(status==GLP_NOFEAS)
229 SG_SPRINT(
"problem has no feasible solution!\n")
244 mkl_block_norm(1),beta_local(NULL), mkl_iterations(0), mkl_epsilon(1e-5),
245 interleaved_optimization(true), w_gap(1.0), rho(0), self()
250 SG_DEBUG(
"creating MKL object %p\n",
this)
259 SG_DEBUG(
"deleting MKL object %p\n",
this)
291 SG_ERROR(
"%s::train_machine(): Number of training vectors (%d) does"
292 " not match number of labels (%d)\n",
get_name(),
300 SG_ERROR(
"No constraint generator (SVM) set\n")
317 int32_t num_weights = -1;
319 SG_INFO(
"num_kernels = %d\n", num_kernels)
322 ASSERT(num_weights==num_kernels)
365 self->cleanup_cplex();
385 SG_ERROR(
"Interleaved MKL optimization is currently "
386 "only supported with SVMlight\n");
399 #ifdef USE_REFERENCE_COUNTING
400 int32_t refs=this->ref();
406 #ifdef USE_REFERENCE_COUNTING
426 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
449 for (int32_t i=0; i<nsv; i++)
462 SG_ERROR(
"Norm must be >= 1, e.g., 1-norm is the standard MKL; norms>1 nonsparse MKL\n")
469 if (lambda>1 || lambda<0)
474 else if (lambda==1.0)
493 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
500 ASSERT(nweights==num_kernels)
503 #if defined(USE_CPLEX) || defined(USE_GLPK)
504 int32_t inner_iters=0;
509 for (int32_t i=0; i<num_kernels; i++)
512 mkl_objective+=old_beta[i]*sumw[i];
546 SG_ERROR(
"Solver type not supported (not compiled in?)\n")
567 int32_t nofKernelsGood;
570 nofKernelsGood = num_kernels;
573 for (p=0; p<num_kernels; ++p )
575 if (sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
577 beta[p] =
CMath::sqrt(sumw[p]*old_beta[p]*old_beta[p]);
594 for( p=0; p<num_kernels; ++p )
599 SG_PRINT(
"MKL-direct: p = %.3f\n", 1.0 )
600 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
601 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
602 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
603 for( p=0; p<num_kernels; ++p )
608 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
609 SG_PRINT(
"MKL-direct: preR/p = %e\n", preR )
611 SG_PRINT(
"MKL-direct: R = %e\n", R )
612 SG_ERROR(
"Assertion R >= 0 failed!\n" )
616 for( p=0; p<num_kernels; ++p )
624 for( p=0; p<num_kernels; ++p )
633 for( p=0; p<num_kernels; ++p )
641 for (p=0; p<num_kernels; ++p )
644 obj += sumw[p] * beta[p];
653 std::list<int32_t> I;
655 for (int32_t i=0; i<len;i++)
665 for (std::list<int32_t>::iterator it=I.begin(); it!=I.end(); it++)
694 for (int32_t i=0; i<n; i++)
698 for (int32_t j=0; j<n; j++)
734 }
while(ff>ff_old+1e-4*gg_old*(del-del_old));
746 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
761 for( p=0; p<num_kernels; ++p )
775 for( p=0; p<num_kernels; ++p )
780 for( p=0; p<num_kernels; ++p )
781 obj += sumw[p] * beta[p];
797 int32_t nofKernelsGood;
800 nofKernelsGood = num_kernels;
801 for( p=0; p<num_kernels; ++p )
804 if( sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
806 beta[p] = sumw[p] * old_beta[p]*old_beta[p] /
mkl_norm;
819 for( p=0; p<num_kernels; ++p )
824 for( p=0; p<num_kernels; ++p )
829 for( p=0; p<num_kernels; ++p )
830 preR +=
CMath::sq( old_beta[p] - beta[p]);
836 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
837 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
838 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
839 for( p=0; p<num_kernels; ++p )
842 SG_PRINT(
"MKL-direct: t[%3d] = %e ( diff = %e = %e - %e )\n", p, t, old_beta[p]-beta[p], old_beta[p], beta[p] )
844 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
847 SG_PRINT(
"MKL-direct: R = %e\n", R )
848 SG_ERROR(
"Assertion R >= 0 failed!\n" )
852 for( p=0; p<num_kernels; ++p )
860 for( p=0; p<num_kernels; ++p )
870 for( p=0; p<num_kernels; ++p )
871 obj += sumw[p] * beta[p];
877 const float64_t* old_beta, int32_t num_kernels,
884 SG_ERROR(
"MKL via NEWTON works only for norms>1\n")
886 const double epsBeta = 1e-32;
887 const double epsGamma = 1e-12;
888 const double epsWsq = 1e-12;
889 const double epsNewt = 0.0001;
890 const double epsStep = 1e-9;
891 const int nofNewtonSteps = 3;
892 const double hessRidge = 1e-6;
893 const int inLogSpace = 0;
908 for( p=0; p<num_kernels; ++p )
910 beta[p] = old_beta[p];
911 if( !( beta[p] >= epsBeta ) )
914 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
919 if( !( fabs(Z-1.0) <= epsGamma ) )
921 SG_WARNING(
"old_beta not normalized (diff=%e); forcing normalization. ", Z-1.0 )
922 for( p=0; p<num_kernels; ++p )
927 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
933 for ( p=0; p<num_kernels; ++p )
935 if ( !( sumw[p] >= 0 ) )
937 if( !( sumw[p] >= -epsWsq ) )
938 SG_WARNING(
"sumw[%d] = %e; treated as 0. ", p, sumw[p] )
950 if( !( gamma > epsGamma ) )
952 SG_WARNING(
"bad gamma: %e; set to %e. ", gamma, epsGamma )
956 ASSERT( gamma >= epsGamma )
961 for( p=0; p<num_kernels; ++p )
963 obj += beta[p] * sumw[p];
966 if( !( obj >= 0.0 ) )
972 for (i = 0; i < nofNewtonSteps; ++i )
977 for( p=0; p<num_kernels; ++p )
979 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
983 const float halfw2p = ( sumw[p] >= 0.0 ) ? (sumw[p]*old_beta[p]*old_beta[p]) : 0.0;
985 const float64_t t1 = ( t0 < 0 ) ? 0.0 : t0;
988 newtDir[p] = t1 / ( t1 + t2*beta[p] + hessRidge );
990 newtDir[p] = ( t1 == 0.0 ) ? 0.0 : ( t1 / t2 );
992 ASSERT( newtDir[p] == newtDir[p] )
1000 while( stepSize >= epsStep )
1006 for( p=0; p<num_kernels; ++p )
1009 newtBeta[p] = beta[p] *
CMath::exp( + stepSize * newtDir[p] );
1011 newtBeta[p] = beta[p] + stepSize * newtDir[p];
1012 if( !( newtBeta[p] >= epsBeta ) )
1013 newtBeta[p] = epsBeta;
1025 for( p=0; p<num_kernels; ++p )
1028 if( newtBeta[p] > 1.0 )
1033 ASSERT( 0.0 <= newtBeta[p] && newtBeta[p] <= 1.0 )
1039 for( p=0; p<num_kernels; ++p )
1040 newtObj += sumw[p] * old_beta[p]*old_beta[p] / newtBeta[p];
1042 if ( newtObj < obj - epsNewt*stepSize*obj )
1044 for( p=0; p<num_kernels; ++p )
1045 beta[p] = newtBeta[p];
1053 if( stepSize < epsStep )
1061 for( p=0; p<num_kernels; ++p )
1062 obj += beta[p] * sumw[p];
1077 int32_t NUMCOLS = 2*num_kernels + 1;
1078 double* x=SG_MALLOC(
double, NUMCOLS);
1084 double obj[NUMCOLS];
1088 for (int32_t i=0; i<2*num_kernels; i++)
1095 for (int32_t i=num_kernels; i<2*num_kernels; i++)
1098 obj[2*num_kernels]=1 ;
1099 lb[2*num_kernels]=-CPX_INFBOUND ;
1100 ub[2*num_kernels]=CPX_INFBOUND ;
1102 int status = CPXnewcols (
self->env,
self->lp_cplex, NUMCOLS, obj, lb, ub, NULL, NULL);
1105 CPXgeterrorstring (
self->env, status, errmsg);
1110 SG_INFO(
"adding the first row\n")
1111 int initial_rmatbeg[1];
1112 int initial_rmatind[num_kernels+1];
1113 double initial_rmatval[num_kernels+1];
1114 double initial_rhs[1];
1115 char initial_sense[1];
1120 initial_rmatbeg[0] = 0;
1122 initial_sense[0]=
'E' ;
1125 for (int32_t i=0; i<num_kernels; i++)
1127 initial_rmatind[i]=i ;
1128 initial_rmatval[i]=1 ;
1130 initial_rmatind[num_kernels]=2*num_kernels ;
1131 initial_rmatval[num_kernels]=0 ;
1133 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, num_kernels+1,
1134 initial_rhs, initial_sense, initial_rmatbeg,
1135 initial_rmatind, initial_rmatval, NULL, NULL);
1140 initial_rmatbeg[0] = 0;
1142 initial_sense[0]=
'L' ;
1144 initial_rmatind[0]=2*num_kernels ;
1145 initial_rmatval[0]=0 ;
1147 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 1,
1148 initial_rhs, initial_sense, initial_rmatbeg,
1149 initial_rmatind, initial_rmatval, NULL, NULL);
1154 for (int32_t i=0; i<num_kernels; i++)
1156 initial_rmatind[i]=i ;
1157 initial_rmatval[i]=1 ;
1159 initial_rmatind[num_kernels]=2*num_kernels ;
1160 initial_rmatval[num_kernels]=0 ;
1162 status = CPXaddqconstr (
self->env,
self->lp_cplex, 0, num_kernels+1, 1.0,
'L', NULL, NULL,
1163 initial_rmatind, initial_rmatind, initial_rmatval, NULL);
1169 SG_ERROR(
"Failed to add the first row.\n")
1175 for (int32_t q=0; q<num_kernels-1; q++)
1192 rmatind[2]=num_kernels+q ;
1194 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 3,
1195 rhs, sense, rmatbeg,
1196 rmatind, rmatval, NULL, NULL);
1198 SG_ERROR(
"Failed to add a smothness row (1).\n")
1207 rmatind[2]=num_kernels+q ;
1209 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 3,
1210 rhs, sense, rmatbeg,
1211 rmatind, rmatval, NULL, NULL);
1213 SG_ERROR(
"Failed to add a smothness row (2).\n")
1222 int rmatind[num_kernels+1];
1223 double rmatval[num_kernels+1];
1235 for (int32_t i=0; i<num_kernels; i++)
1239 rmatval[i]=-(sumw[i]-suma) ;
1241 rmatval[i]=-sumw[i];
1243 rmatind[num_kernels]=2*num_kernels ;
1244 rmatval[num_kernels]=-1 ;
1246 int32_t status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, num_kernels+1,
1247 rhs, sense, rmatbeg,
1248 rmatind, rmatval, NULL, NULL);
1250 SG_ERROR(
"Failed to add the new row.\n")
1258 status = CPXlpopt (
self->env,
self->lp_cplex);
1260 status = CPXbaropt(
self->env,
self->lp_cplex);
1265 for (int32_t i=0; i<num_kernels; i++)
1266 beta[i]=old_beta[i];
1267 for (int32_t i=num_kernels; i<2*num_kernels+1; i++)
1275 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1277 set_qnorm_constraints(beta, num_kernels);
1279 status = CPXbaropt(
self->env,
self->lp_cplex);
1281 SG_ERROR(
"Failed to optimize Problem.\n")
1285 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1286 (
double*) beta, NULL, NULL, NULL);
1290 CMath::display_vector(beta, num_kernels,
"beta");
1291 SG_ERROR(
"Failed to obtain solution.\n")
1294 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1308 SG_ERROR(
"Failed to optimize Problem.\n")
1311 int32_t cur_numrows=(int32_t) CPXgetnumrows(
self->env,
self->lp_cplex);
1312 int32_t cur_numcols=(int32_t) CPXgetnumcols(
self->env,
self->lp_cplex);
1313 int32_t num_rows=cur_numrows;
1314 ASSERT(cur_numcols<=2*num_kernels+1)
1325 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1326 (
double*) x, (
double*) pi, (
double*) slack, NULL);
1330 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1331 (
double*) x, NULL, (
double*) slack, NULL);
1334 int32_t solution_ok = (!status) ;
1336 SG_ERROR(
"Failed to obtain solution.\n")
1338 int32_t num_active_rows=0 ;
1343 int32_t max_idx = -1 ;
1344 int32_t start_row = 1 ;
1346 start_row+=2*(num_kernels-1);
1348 for (int32_t i = start_row; i < cur_numrows; i++)
1356 if (slack[i]>max_slack)
1358 max_slack=slack[i] ;
1369 if (slack[i]>max_slack)
1371 max_slack=slack[i] ;
1379 if ( (num_rows-start_row>
CMath::max(100,2*num_active_rows)) && (max_idx!=-1))
1382 status = CPXdelrows (
self->env,
self->lp_cplex, max_idx, max_idx) ;
1384 SG_ERROR(
"Failed to remove an old row.\n")
1389 rho = -x[2*num_kernels] ;
1401 for (int32_t i=0; i<num_kernels; i++)
1406 SG_ERROR(
"Cplex not enabled at compile time\n")
1417 SG_ERROR(
"MKL via GLPK works only for norm=1\n")
1421 int32_t NUMCOLS = 2*num_kernels + 1 ;
1427 glp_add_cols(
self->lp_glpk, NUMCOLS);
1428 for (
int i=1; i<=2*num_kernels; i++)
1430 glp_set_obj_coef(
self->lp_glpk, i, 0);
1431 glp_set_col_bnds(
self->lp_glpk, i, GLP_DB, 0, 1);
1433 for (
int i=num_kernels+1; i<=2*num_kernels; i++)
1435 glp_set_obj_coef(
self->lp_glpk, i,
C_mkl);
1437 glp_set_obj_coef(
self->lp_glpk, NUMCOLS, 1);
1438 glp_set_col_bnds(
self->lp_glpk, NUMCOLS, GLP_FR, 0,0);
1441 int row_index = glp_add_rows(
self->lp_glpk, 1);
1442 int* ind = SG_MALLOC(
int, num_kernels+2);
1444 for (
int i=1; i<=num_kernels; i++)
1449 ind[num_kernels+1] = NUMCOLS;
1450 val[num_kernels+1] = 0;
1451 glp_set_mat_row(
self->lp_glpk, row_index, num_kernels, ind, val);
1452 glp_set_row_bnds(
self->lp_glpk, row_index, GLP_FX, 1, 1);
1460 for (int32_t q=1; q<num_kernels; q++)
1464 int mat_row_index = glp_add_rows(
self->lp_glpk, 2);
1469 mat_ind[3] = num_kernels+q;
1471 glp_set_mat_row(
self->lp_glpk, mat_row_index, 3, mat_ind, mat_val);
1472 glp_set_row_bnds(
self->lp_glpk, mat_row_index, GLP_UP, 0, 0);
1475 glp_set_mat_row(
self->lp_glpk, mat_row_index+1, 3, mat_ind, mat_val);
1476 glp_set_row_bnds(
self->lp_glpk, mat_row_index+1, GLP_UP, 0, 0);
1481 int* ind=SG_MALLOC(
int,num_kernels+2);
1483 int row_index = glp_add_rows(
self->lp_glpk, 1);
1484 for (int32_t i=1; i<=num_kernels; i++)
1487 val[i] = -(sumw[i-1]-suma);
1489 ind[num_kernels+1] = 2*num_kernels+1;
1490 val[num_kernels+1] = -1;
1491 glp_set_mat_row(
self->lp_glpk, row_index, num_kernels+1, ind, val);
1492 glp_set_row_bnds(
self->lp_glpk, row_index, GLP_UP, 0, 0);
1497 glp_simplex(
self->lp_glpk,
self->lp_glpk_parm);
1498 bool res =
self->check_glp_status();
1500 SG_ERROR(
"Failed to optimize Problem.\n")
1502 int32_t cur_numrows = glp_get_num_rows(
self->lp_glpk);
1503 int32_t cur_numcols = glp_get_num_cols(
self->lp_glpk);
1504 int32_t num_rows=cur_numrows;
1505 ASSERT(cur_numcols<=2*num_kernels+1)
1511 for (
int i=0; i<cur_numrows; i++)
1513 row_primal[i] = glp_get_row_prim(
self->lp_glpk, i+1);
1514 row_dual[i] = glp_get_row_dual(
self->lp_glpk, i+1);
1516 for (
int i=0; i<cur_numcols; i++)
1517 col_primal[i] = glp_get_col_prim(
self->lp_glpk, i+1);
1519 obj = -col_primal[2*num_kernels];
1521 for (
int i=0; i<num_kernels; i++)
1522 beta[i] = col_primal[i];
1524 int32_t num_active_rows=0;
1528 int32_t max_idx = -1;
1529 int32_t start_row = 1;
1531 start_row += 2*(num_kernels-1);
1533 for (int32_t i= start_row; i<cur_numrows; i++)
1539 if (row_primal[i]<max_slack)
1541 max_slack = row_primal[i];
1547 if ((num_rows-start_row>
CMath::max(100, 2*num_active_rows)) && max_idx!=-1)
1550 del_rows[1] = max_idx+1;
1551 glp_del_rows(
self->lp_glpk, 1, del_rows);
1556 SG_FREE(row_primal);
1557 SG_FREE(col_primal);
1559 SG_ERROR(
"Glpk not enabled at compile time\n")
1575 ASSERT(nweights==num_kernels)
1578 for (int32_t i=0; i<num_kernels; i++)
1584 for (int32_t n=0; n<num_kernels; n++)
1591 for (int32_t i=0; i<nsv; i++)
1595 for (int32_t j=0; j<nsv; j++)
1627 for (int32_t i=0; i<n; i++)
1631 for (int32_t j=0; j<n; j++)
1647 mkl_obj=-0.5*mkl_obj;
1654 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
void set_shrinking_enabled(bool enable)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
int32_t get_num_support_vectors()
void set_bias_enabled(bool enable_bias)
void set_mkl_block_norm(float64_t q)
bool cleanup_cplex(bool &lp_init)
void set_max_train_time(float64_t t)
static const float64_t INFTY
infinity
virtual int32_t get_num_labels() const =0
virtual void init_training()=0
void elasticnet_transform(float64_t *beta, float64_t lmd, int32_t len)
float64_t compute_optimal_betas_block_norm(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
virtual int32_t get_num_vectors() const =0
virtual void compute_sum_beta(float64_t *sumw)
void set_callback_function(CMKL *m, bool(*cb)(CMKL *mkl, const float64_t *sumw, const float64_t suma))
float64_t compute_optimal_betas_directly(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t kernel(int32_t idx_a, int32_t idx_b)
void set_mkl_norm(float64_t norm)
void set_nu(float64_t nue)
float64_t compute_elasticnet_dual_objective()
virtual bool perform_mkl_step(const float64_t *sumw, float64_t suma)
virtual float64_t compute_mkl_dual_objective()
static void scale_vector(T alpha, T *vec, int32_t len)
Scale vector inplace.
static T * clone_vector(const T *vec, int32_t len)
float64_t cur_time_diff(bool verbose=false)
CTime training_time_clock
float64_t compute_optimal_betas_newton(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, float64_t mkl_objective)
void set_constraint_generator(CSVM *s)
bool get_batch_computation_enabled()
void set_bias(float64_t bias)
void set_batch_computation_enabled(bool enable)
static void clear_cancel()
bool get_shrinking_enabled()
void elasticnet_dual(float64_t *ff, float64_t *gg, float64_t *hh, const float64_t &del, const float64_t *nm, int32_t len, const float64_t &lambda)
bool set_alpha(int32_t idx, float64_t val)
float64_t start(bool verbose=false)
void set_qpsize(int32_t qps)
float64_t get_max_train_time()
float64_t get_alpha(int32_t idx)
ESolverType get_solver_type()
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
bool set_support_vector(int32_t idx, int32_t val)
virtual EMachineType get_classifier_type()
bool cleanup_glpk(bool &lp_init)
int32_t get_support_vector(int32_t idx)
bool interleaved_optimization
static bool cancel_computations()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
float64_t compute_optimal_betas_elasticnet(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t compute_optimal_betas_via_cplex(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
all of classes and functions are contained in the shogun namespace
bool get_linadd_enabled()
virtual float64_t compute_sum_alpha()=0
virtual bool train_machine(CFeatures *data=NULL)
virtual EKernelType get_kernel_type()=0
virtual const char * get_name() const
The class Features is the base class of all feature objects.
static float64_t exp(float64_t x)
virtual bool train(CFeatures *data=NULL)
void set_qnorm_constraints(float64_t *beta, int32_t num_kernels)
virtual int32_t get_num_subkernels()
A generic Support Vector Machine Interface.
void set_linadd_enabled(bool enable)
void set_elasticnet_lambda(float64_t elasticnet_lambda)
void set_epsilon(float64_t eps)
void set_kernel(CKernel *k)
static float32_t sqrt(float32_t x)
virtual void set_labels(CLabels *lab)
static bool perform_mkl_step_helper(CMKL *mkl, const float64_t *sumw, const float64_t suma)
void set_solver_type(ESolverType st)
static int32_t pow(bool x, int32_t n)
void set_C(float64_t c_neg, float64_t c_pos)
float64_t compute_optimal_betas_via_glpk(float64_t *beta, const float64_t *old_beta, int num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
bool create_new_model(int32_t num)