00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "lib/config.h"
00013
00014 #ifdef USE_CPLEX
00015
00016 #include "lib/Mathematics.h"
00017 #include "lib/Signal.h"
00018 #include "lib/Time.h"
00019 #include "classifier/LinearClassifier.h"
00020 #include "classifier/SubGradientLPM.h"
00021 #include "classifier/svm/qpbsvmlib.h"
00022 #include "features/DotFeatures.h"
00023 #include "features/Labels.h"
00024
00025 using namespace shogun;
00026
00027 #define DEBUG_SUBGRADIENTLPM
00028
00029 CSubGradientLPM::CSubGradientLPM()
00030 : CLinearClassifier(), C1(1), C2(1), epsilon(1e-5), qpsize(42),
00031 qpsize_max(2000), use_bias(false), delta_active(0), delta_bound(0)
00032 {
00033 }
00034
00035 CSubGradientLPM::CSubGradientLPM(
00036 float64_t C, CDotFeatures* traindat, CLabels* trainlab)
00037 : CLinearClassifier(), C1(C), C2(C), epsilon(1e-5), qpsize(42),
00038 qpsize_max(2000), use_bias(false), delta_active(0), delta_bound(0)
00039 {
00040 CLinearClassifier::features=traindat;
00041 CClassifier::labels=trainlab;
00042 }
00043
00044
00045 CSubGradientLPM::~CSubGradientLPM()
00046 {
00047 cleanup();
00048 }
00049
00050 int32_t CSubGradientLPM::find_active(
00051 int32_t num_feat, int32_t num_vec, int32_t& num_active, int32_t& num_bound)
00052 {
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079 delta_bound=0;
00080 delta_active=0;
00081 num_active=0;
00082 num_bound=0;
00083
00084 for (int32_t i=0; i<num_vec; i++)
00085 {
00086 active[i]=0;
00087
00088
00089 if (proj[i] < 1-autoselected_epsilon)
00090 {
00091 idx_active[num_active++]=i;
00092 active[i]=1;
00093 }
00094
00095
00096 if (CMath::abs(proj[i]-1) <= autoselected_epsilon)
00097 {
00098 idx_bound[num_bound++]=i;
00099 active[i]=2;
00100 }
00101
00102 if (active[i]!=old_active[i])
00103 delta_active++;
00104
00105 if (active[i]==2 && old_active[i]==2)
00106 delta_bound++;
00107 }
00108
00109
00110 if (delta_active==0 && work_epsilon<=epsilon)
00111 return 0;
00112 else if (delta_active==0)
00113 {
00114 work_epsilon=CMath::min(work_epsilon/2, autoselected_epsilon);
00115 work_epsilon=CMath::max(work_epsilon, epsilon);
00116 num_bound=qpsize;
00117 }
00118
00119 delta_bound=0;
00120 delta_active=0;
00121 num_active=0;
00122 num_bound=0;
00123
00124 for (int32_t i=0; i<num_vec; i++)
00125 {
00126 tmp_proj[i]=CMath::abs(proj[i]-1);
00127 tmp_proj_idx[i]=i;
00128 }
00129
00130 CMath::qsort_index(tmp_proj, tmp_proj_idx, num_vec);
00131
00132 autoselected_epsilon=tmp_proj[CMath::min(qpsize,num_vec)];
00133
00134 #ifdef DEBUG_SUBGRADIENTSVM
00135
00136 #endif
00137
00138 if (autoselected_epsilon>work_epsilon)
00139 autoselected_epsilon=work_epsilon;
00140
00141 if (autoselected_epsilon<epsilon)
00142 {
00143 autoselected_epsilon=epsilon;
00144
00145 int32_t i=0;
00146 while (i < num_vec && tmp_proj[i] <= autoselected_epsilon)
00147 i++;
00148
00149
00150
00151 if (i>=qpsize_max && autoselected_epsilon>epsilon)
00152 {
00153 SG_PRINT("qpsize limit (%d) reached\n", qpsize_max);
00154 int32_t num_in_qp=i;
00155 while (--i>=0 && num_in_qp>=qpsize_max)
00156 {
00157 if (tmp_proj[i] < autoselected_epsilon)
00158 {
00159 autoselected_epsilon=tmp_proj[i];
00160 num_in_qp--;
00161 }
00162 }
00163
00164
00165 }
00166 }
00167
00168 for (int32_t i=0; i<num_vec; i++)
00169 {
00170 active[i]=0;
00171
00172
00173 if (proj[i] < 1-autoselected_epsilon)
00174 {
00175 idx_active[num_active++]=i;
00176 active[i]=1;
00177 }
00178
00179
00180 if (CMath::abs(proj[i]-1) <= autoselected_epsilon)
00181 {
00182 idx_bound[num_bound++]=i;
00183 active[i]=2;
00184 }
00185
00186 if (active[i]!=old_active[i])
00187 delta_active++;
00188
00189 if (active[i]==2 && old_active[i]==2)
00190 delta_bound++;
00191 }
00192
00193 pos_idx=0;
00194 neg_idx=0;
00195 zero_idx=0;
00196
00197 for (int32_t i=0; i<num_feat; i++)
00198 {
00199 if (w[i]>work_epsilon)
00200 {
00201 w_pos[pos_idx++]=i;
00202 grad_w[i]=1;
00203 }
00204 else if (w[i]<-work_epsilon)
00205 {
00206 w_neg[neg_idx++]=i;
00207 grad_w[i]=-1;
00208 }
00209
00210 if (CMath::abs(w[i])<=work_epsilon)
00211 {
00212 w_zero[zero_idx++]=i;
00213 grad_w[i]=-1;
00214 }
00215 }
00216
00217 return delta_active;
00218 }
00219
00220
00221 void CSubGradientLPM::update_active(int32_t num_feat, int32_t num_vec)
00222 {
00223 for (int32_t i=0; i<num_vec; i++)
00224 {
00225 if (active[i]==1 && old_active[i]!=1)
00226 {
00227 features->add_to_dense_vec(C1*get_label(i), i, sum_CXy_active, num_feat);
00228 if (use_bias)
00229 sum_Cy_active+=C1*get_label(i);
00230 }
00231 else if (old_active[i]==1 && active[i]!=1)
00232 {
00233 features->add_to_dense_vec(-C1*get_label(i), i, sum_CXy_active, num_feat);
00234 if (use_bias)
00235 sum_Cy_active-=C1*get_label(i);
00236 }
00237 }
00238
00239 CMath::swap(active,old_active);
00240 }
00241
00242 float64_t CSubGradientLPM::line_search(int32_t num_feat, int32_t num_vec)
00243 {
00244 int32_t num_hinge=0;
00245 float64_t alpha=0;
00246 float64_t sgrad=0;
00247
00248 float64_t* A=new float64_t[num_feat+num_vec];
00249 float64_t* B=new float64_t[num_feat+num_vec];
00250 float64_t* C=new float64_t[num_feat+num_vec];
00251 float64_t* D=new float64_t[num_feat+num_vec];
00252
00253 for (int32_t i=0; i<num_feat+num_vec; i++)
00254 {
00255 if (i<num_feat)
00256 {
00257 A[i]=-grad_w[i];
00258 B[i]=w[i];
00259 C[i]=+grad_w[i];
00260 D[i]=-w[i];
00261 }
00262 else
00263 {
00264 float64_t p=get_label(i-num_feat)*(features->dense_dot(i-num_feat, grad_w, num_feat)+grad_b);
00265 grad_proj[i-num_feat]=p;
00266
00267 A[i]=0;
00268 B[i]=0;
00269 C[i]=C1*p;
00270 D[i]=C1*(1-proj[i-num_feat]);
00271 }
00272
00273 if (A[i]==C[i] && B[i]>D[i])
00274 sgrad+=A[i]+C[i];
00275 else if (A[i]==C[i] && B[i]==D[i])
00276 sgrad+=CMath::max(A[i],C[i]);
00277 else if (A[i]!=C[i])
00278 {
00279 hinge_point[num_hinge]=(D[i]-B[i])/(A[i]-C[i]);
00280 hinge_idx[num_hinge]=i;
00281 num_hinge++;
00282
00283 if (A[i]>C[i])
00284 sgrad+=C[i];
00285 if (A[i]<C[i])
00286 sgrad+=A[i];
00287 }
00288 }
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299 CMath::qsort_index(hinge_point, hinge_idx, num_hinge);
00300
00301
00302
00303 int32_t i=-1;
00304 while (i < num_hinge-1 && sgrad < 0)
00305 {
00306 i+=1;
00307
00308 if (A[hinge_idx[i]] > C[hinge_idx[i]])
00309 sgrad += A[hinge_idx[i]] - C[hinge_idx[i]];
00310 else
00311 sgrad += C[hinge_idx[i]] - A[hinge_idx[i]];
00312 }
00313
00314 alpha = hinge_point[i];
00315
00316 delete[] D;
00317 delete[] C;
00318 delete[] B;
00319 delete[] A;
00320
00321
00322 return alpha;
00323 }
00324
00325 float64_t CSubGradientLPM::compute_min_subgradient(
00326 int32_t num_feat, int32_t num_vec, int32_t num_active, int32_t num_bound)
00327 {
00328 float64_t dir_deriv=0;
00329 solver->init(E_QP);
00330
00331 if (zero_idx+num_bound > 0)
00332 {
00333
00334
00335 CMath::add(grad_w, 1.0, grad_w, -1.0, sum_CXy_active, num_feat);
00336 grad_w[num_feat]= -sum_Cy_active;
00337
00338 grad_b = -sum_Cy_active;
00339
00340
00341
00342
00343
00344
00345 solver->setup_subgradientlpm_QP(C1, labels, (CSparseFeatures<float64_t>*) features, idx_bound, num_bound,
00346 w_zero, zero_idx,
00347 grad_w, num_feat+1,
00348 use_bias);
00349
00350 solver->optimize(beta);
00351
00352
00353
00354
00355 dir_deriv = CMath::dot(beta, grad_w, num_feat);
00356 dir_deriv-=beta[num_feat]*sum_Cy_active;
00357
00358 for (int32_t i=0; i<num_bound; i++)
00359 {
00360 float64_t val= C1*get_label(idx_bound[i])*(features->dense_dot(idx_bound[i], beta, num_feat)+ beta[num_feat]);
00361 dir_deriv += CMath::max(0.0, val);
00362 }
00363
00364 for (int32_t i=0; i<num_feat; i++)
00365 grad_w[i]=beta[i];
00366
00367 if (use_bias)
00368 grad_b=beta[num_feat];
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393 }
00394 else
00395 {
00396 CMath::add(grad_w, 1.0, w, -1.0, sum_CXy_active, num_feat);
00397 grad_b = -sum_Cy_active;
00398
00399 dir_deriv = CMath::dot(grad_w, grad_w, num_feat)+ grad_b*grad_b;
00400 }
00401
00402 solver->cleanup();
00403
00404
00405
00406
00407
00408 return dir_deriv;
00409 }
00410
00411 float64_t CSubGradientLPM::compute_objective(int32_t num_feat, int32_t num_vec)
00412 {
00413 float64_t result= CMath::sum_abs(w, num_feat);
00414
00415 for (int32_t i=0; i<num_vec; i++)
00416 {
00417 if (proj[i]<1.0)
00418 result += C1 * (1.0-proj[i]);
00419 }
00420
00421 return result;
00422 }
00423
00424 void CSubGradientLPM::compute_projection(int32_t num_feat, int32_t num_vec)
00425 {
00426 for (int32_t i=0; i<num_vec; i++)
00427 proj[i]=get_label(i)*(features->dense_dot(i, w, num_feat) + bias);
00428 }
00429
00430 void CSubGradientLPM::update_projection(float64_t alpha, int32_t num_vec)
00431 {
00432 CMath::vec1_plus_scalar_times_vec2(proj,-alpha, grad_proj, num_vec);
00433 }
00434
00435 void CSubGradientLPM::init(int32_t num_vec, int32_t num_feat)
00436 {
00437
00438 delete[] w;
00439 w=new float64_t[num_feat];
00440 w_dim=num_feat;
00441 for (int32_t i=0; i<num_feat; i++)
00442 w[i]=1.0;
00443
00444 bias=0;
00445 num_it_noimprovement=0;
00446 grad_b=0;
00447
00448 w_pos=new int32_t[num_feat];
00449 memset(w_pos,0,sizeof(int32_t)*num_feat);
00450
00451 w_zero=new int32_t[num_feat];
00452 memset(w_zero,0,sizeof(int32_t)*num_feat);
00453
00454 w_neg=new int32_t[num_feat];
00455 memset(w_neg,0,sizeof(int32_t)*num_feat);
00456
00457 grad_w=new float64_t[num_feat+1];
00458 memset(grad_w,0,sizeof(float64_t)*(num_feat+1));
00459
00460 sum_CXy_active=new float64_t[num_feat];
00461 memset(sum_CXy_active,0,sizeof(float64_t)*num_feat);
00462
00463 sum_Cy_active=0;
00464
00465 proj=new float64_t[num_vec];
00466 memset(proj,0,sizeof(float64_t)*num_vec);
00467
00468 tmp_proj=new float64_t[num_vec];
00469 memset(proj,0,sizeof(float64_t)*num_vec);
00470
00471 tmp_proj_idx=new int32_t[num_vec];
00472 memset(tmp_proj_idx,0,sizeof(int32_t)*num_vec);
00473
00474 grad_proj=new float64_t[num_vec];
00475 memset(grad_proj,0,sizeof(float64_t)*num_vec);
00476
00477 hinge_point=new float64_t[num_vec+num_feat];
00478 memset(hinge_point,0,sizeof(float64_t)*(num_vec+num_feat));
00479
00480 hinge_idx=new int32_t[num_vec+num_feat];
00481 memset(hinge_idx,0,sizeof(int32_t)*(num_vec+num_feat));
00482
00483 active=new uint8_t[num_vec];
00484 memset(active,0,sizeof(uint8_t)*num_vec);
00485
00486 old_active=new uint8_t[num_vec];
00487 memset(old_active,0,sizeof(uint8_t)*num_vec);
00488
00489 idx_bound=new int32_t[num_vec];
00490 memset(idx_bound,0,sizeof(int32_t)*num_vec);
00491
00492 idx_active=new int32_t[num_vec];
00493 memset(idx_active,0,sizeof(int32_t)*num_vec);
00494
00495 beta=new float64_t[num_feat+1+num_feat+num_vec];
00496 memset(beta,0,sizeof(float64_t)*num_feat+1+num_feat+num_vec);
00497
00498 solver=new CCplex();
00499 }
00500
00501 void CSubGradientLPM::cleanup()
00502 {
00503 delete[] hinge_idx;
00504 delete[] hinge_point;
00505 delete[] grad_proj;
00506 delete[] proj;
00507 delete[] tmp_proj;
00508 delete[] tmp_proj_idx;
00509 delete[] active;
00510 delete[] old_active;
00511 delete[] idx_bound;
00512 delete[] idx_active;
00513 delete[] sum_CXy_active;
00514 delete[] w_pos;
00515 delete[] w_zero;
00516 delete[] w_neg;
00517 delete[] grad_w;
00518 delete[] beta;
00519
00520 hinge_idx=NULL;
00521 hinge_point=NULL;
00522 grad_proj=NULL;
00523 proj=NULL;
00524 tmp_proj=NULL;
00525 tmp_proj_idx=NULL;
00526 active=NULL;
00527 old_active=NULL;
00528 idx_bound=NULL;
00529 idx_active=NULL;
00530 sum_CXy_active=NULL;
00531 w_pos=NULL;
00532 w_zero=NULL;
00533 w_neg=NULL;
00534 grad_w=NULL;
00535 beta=NULL;
00536
00537 delete solver;
00538 solver=NULL;
00539 }
00540
00541 bool CSubGradientLPM::train(CFeatures* data)
00542 {
00543 lpmtim=0;
00544 SG_INFO("C=%f epsilon=%f\n", C1, epsilon);
00545 ASSERT(labels);
00546 if (data)
00547 {
00548 if (!data->has_property(FP_DOT))
00549 SG_ERROR("Specified features are not of type CDotFeatures\n");
00550 set_features((CDotFeatures*) data);
00551 }
00552 ASSERT(features);
00553
00554 int32_t num_iterations=0;
00555 int32_t num_train_labels=labels->get_num_labels();
00556 int32_t num_feat=features->get_dim_feature_space();
00557 int32_t num_vec=features->get_num_vectors();
00558
00559 ASSERT(num_vec==num_train_labels);
00560
00561 init(num_vec, num_feat);
00562
00563 int32_t num_active=0;
00564 int32_t num_bound=0;
00565 float64_t alpha=0;
00566 float64_t dir_deriv=0;
00567 float64_t obj=0;
00568 delta_active=num_vec;
00569 last_it_noimprovement=-1;
00570
00571 work_epsilon=0.99;
00572 autoselected_epsilon=work_epsilon;
00573
00574 compute_projection(num_feat, num_vec);
00575
00576 CTime time;
00577 float64_t loop_time=0;
00578 while (!(CSignal::cancel_computations()))
00579 {
00580 CTime t;
00581 delta_active=find_active(num_feat, num_vec, num_active, num_bound);
00582
00583 update_active(num_feat, num_vec);
00584
00585 #ifdef DEBUG_SUBGRADIENTLPM
00586 SG_PRINT("==================================================\niteration: %d ", num_iterations);
00587 obj=compute_objective(num_feat, num_vec);
00588 SG_PRINT("objective:%.10f alpha: %.10f dir_deriv: %f num_bound: %d num_active: %d work_eps: %10.10f eps: %10.10f auto_eps: %10.10f time:%f\n",
00589 obj, alpha, dir_deriv, num_bound, num_active, work_epsilon, epsilon, autoselected_epsilon, loop_time);
00590 #else
00591 SG_ABS_PROGRESS(work_epsilon, -CMath::log10(work_epsilon), -CMath::log10(0.99999999), -CMath::log10(epsilon), 6);
00592 #endif
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605 dir_deriv=compute_min_subgradient(num_feat, num_vec, num_active, num_bound);
00606
00607 alpha=line_search(num_feat, num_vec);
00608
00609 if (num_it_noimprovement==10 || num_bound<qpsize_max)
00610 {
00611 float64_t norm_grad=CMath::dot(grad_w, grad_w, num_feat) +
00612 grad_b*grad_b;
00613
00614 SG_PRINT("CHECKING OPTIMALITY CONDITIONS: "
00615 "work_epsilon: %10.10f delta_active:%d alpha: %10.10f norm_grad: %10.10f a*norm_grad:%10.16f\n",
00616 work_epsilon, delta_active, alpha, norm_grad, CMath::abs(alpha*norm_grad));
00617
00618 if (work_epsilon<=epsilon && delta_active==0 && CMath::abs(alpha*norm_grad)<1e-6)
00619 break;
00620 else
00621 num_it_noimprovement=0;
00622 }
00623
00624
00625 if ((dir_deriv<0 || alpha==0) && (work_epsilon<=epsilon && delta_active==0))
00626 {
00627 if (last_it_noimprovement==num_iterations-1)
00628 {
00629 SG_PRINT("no improvement...\n");
00630 num_it_noimprovement++;
00631 }
00632 else
00633 num_it_noimprovement=0;
00634
00635 last_it_noimprovement=num_iterations;
00636 }
00637
00638 CMath::vec1_plus_scalar_times_vec2(w, -alpha, grad_w, num_feat);
00639 bias-=alpha*grad_b;
00640
00641 update_projection(alpha, num_vec);
00642
00643 t.stop();
00644 loop_time=t.time_diff_sec();
00645 num_iterations++;
00646
00647 if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time())
00648 break;
00649 }
00650
00651 SG_INFO("converged after %d iterations\n", num_iterations);
00652
00653 obj=compute_objective(num_feat, num_vec);
00654 SG_INFO("objective: %f alpha: %f dir_deriv: %f num_bound: %d num_active: %d\n",
00655 obj, alpha, dir_deriv, num_bound, num_active);
00656
00657 #ifdef DEBUG_SUBGRADIENTLPM
00658 CMath::display_vector(w, w_dim, "w");
00659 SG_PRINT("bias: %f\n", bias);
00660 #endif
00661 SG_PRINT("solver time:%f s\n", lpmtim);
00662
00663 cleanup();
00664
00665 return true;
00666 }
00667 #endif //USE_CPLEX