00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "lib/config.h"
00012
00013 #ifdef USE_SVMLIGHT
00014
00015 #include "lib/io.h"
00016 #include "lib/lapack.h"
00017 #include "lib/Signal.h"
00018 #include "lib/Mathematics.h"
00019 #include "regression/svr/SVRLight.h"
00020 #include "classifier/KernelMachine.h"
00021 #include "kernel/CombinedKernel.h"
00022
00023 #include <unistd.h>
00024
00025 #ifdef USE_CPLEX
00026 extern "C" {
00027 #include <ilcplex/cplex.h>
00028 }
00029 #endif
00030
00031 #include "base/Parallel.h"
00032
00033 #ifndef WIN32
00034 #include <pthread.h>
00035 #endif
00036
00037 using namespace shogun;
00038
00039 #ifndef DOXYGEN_SHOULD_SKIP_THIS
00040 struct S_THREAD_PARAM
00041 {
00042 float64_t* lin;
00043 int32_t start, end;
00044 int32_t* active2dnum;
00045 int32_t* docs;
00046 CKernel* kernel;
00047 int32_t num_vectors;
00048 };
00049 #endif // DOXYGEN_SHOULD_SKIP_THIS
00050
00051 CSVRLight::CSVRLight(float64_t C, float64_t eps, CKernel* k, CLabels* lab)
00052 : CSVMLight(C, k, lab)
00053 {
00054 set_tube_epsilon(eps);
00055 }
00056
00057 CSVRLight::CSVRLight()
00058 : CSVMLight()
00059 {
00060 }
00061
00062 bool CSVRLight::train(CFeatures* data)
00063 {
00064
00065 verbosity=1;
00066 init_margin=0.15;
00067 init_iter=500;
00068 precision_violations=0;
00069 opt_precision=DEF_PRECISION;
00070
00071 strcpy (learn_parm->predfile, "");
00072 learn_parm->biased_hyperplane=1;
00073 learn_parm->sharedslack=0;
00074 learn_parm->remove_inconsistent=0;
00075 learn_parm->skip_final_opt_check=1;
00076 learn_parm->svm_maxqpsize=get_qpsize();
00077 learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize-1;
00078 learn_parm->maxiter=100000;
00079 learn_parm->svm_iter_to_shrink=100;
00080 learn_parm->svm_c=get_C1();
00081 learn_parm->transduction_posratio=0.33;
00082 learn_parm->svm_costratio=get_C2()/get_C1();
00083 learn_parm->svm_costratio_unlab=1.0;
00084 learn_parm->svm_unlabbound=1E-5;
00085 learn_parm->epsilon_crit=epsilon;
00086 learn_parm->epsilon_a=1E-15;
00087 learn_parm->compute_loo=0;
00088 learn_parm->rho=1.0;
00089 learn_parm->xa_depth=0;
00090
00091 if (!kernel)
00092 {
00093 SG_ERROR( "SVR_light can not proceed without kernel!\n");
00094 return false ;
00095 }
00096
00097 if (!labels)
00098 {
00099 SG_ERROR( "SVR_light can not proceed without labels!\n");
00100 return false;
00101 }
00102
00103 if (data)
00104 {
00105 if (labels->get_num_labels() != data->get_num_vectors())
00106 SG_ERROR("Number of training vectors does not match number of labels\n");
00107 kernel->init(data, data);
00108 }
00109
00110 if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
00111 kernel->clear_normal();
00112
00113
00114 SG_DEBUG( "qpsize = %i\n", learn_parm->svm_maxqpsize) ;
00115 SG_DEBUG( "epsilon = %1.1e\n", learn_parm->epsilon_crit) ;
00116 SG_DEBUG( "kernel->has_property(KP_LINADD) = %i\n", kernel->has_property(KP_LINADD)) ;
00117 SG_DEBUG( "kernel->has_property(KP_KERNCOMBINATION) = %i\n", kernel->has_property(KP_KERNCOMBINATION)) ;
00118 SG_DEBUG( "get_linadd_enabled() = %i\n", get_linadd_enabled()) ;
00119 SG_DEBUG( "kernel->get_num_subkernels() = %i\n", kernel->get_num_subkernels()) ;
00120
00121 use_kernel_cache = !((kernel->get_kernel_type() == K_CUSTOM) ||
00122 (get_linadd_enabled() && kernel->has_property(KP_LINADD)));
00123
00124 SG_DEBUG( "use_kernel_cache = %i\n", use_kernel_cache) ;
00125
00126
00127 svr_learn();
00128
00129
00130 create_new_model(model->sv_num-1);
00131 set_bias(-model->b);
00132 for (int32_t i=0; i<model->sv_num-1; i++)
00133 {
00134 set_alpha(i, model->alpha[i+1]);
00135 set_support_vector(i, model->supvec[i+1]);
00136 }
00137
00138 if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
00139 kernel->clear_normal() ;
00140
00141 return true ;
00142 }
00143
00144 void CSVRLight::svr_learn()
00145 {
00146 int32_t *inconsistent, i, j;
00147 int32_t inconsistentnum;
00148 int32_t upsupvecnum;
00149 float64_t maxdiff, *lin, *c, *a;
00150 int32_t runtime_start,runtime_end;
00151 int32_t iterations;
00152 float64_t *xi_fullset;
00153 float64_t *a_fullset;
00154 TIMING timing_profile;
00155 SHRINK_STATE shrink_state;
00156 int32_t* label;
00157 int32_t* docs;
00158
00159 ASSERT(labels);
00160 int32_t totdoc=labels->get_num_labels();
00161 num_vectors=totdoc;
00162
00163
00164 docs=new int32_t[2*totdoc];
00165 label=new int32_t[2*totdoc];
00166 c = new float64_t[2*totdoc];
00167
00168 for(i=0;i<totdoc;i++) {
00169 docs[i]=i;
00170 j=2*totdoc-1-i;
00171 label[i]=+1;
00172 c[i]=labels->get_label(i);
00173 docs[j]=j;
00174 label[j]=-1;
00175 c[j]=labels->get_label(i);
00176 }
00177 totdoc*=2;
00178
00179
00180 kernel->resize_kernel_cache( kernel->get_cache_size(), true);
00181
00182 if (kernel->get_kernel_type() == K_COMBINED)
00183 {
00184 CCombinedKernel* k = (CCombinedKernel*) kernel;
00185 CKernel* kn = k->get_first_kernel();
00186
00187 while (kn)
00188 {
00189 kn->resize_kernel_cache( kernel->get_cache_size(), true);
00190 SG_UNREF(kn);
00191 kn = k->get_next_kernel();
00192 }
00193 }
00194
00195 runtime_start=get_runtime();
00196 timing_profile.time_kernel=0;
00197 timing_profile.time_opti=0;
00198 timing_profile.time_shrink=0;
00199 timing_profile.time_update=0;
00200 timing_profile.time_model=0;
00201 timing_profile.time_check=0;
00202 timing_profile.time_select=0;
00203
00204 delete[] W;
00205 W=NULL;
00206
00207 if (kernel->has_property(KP_KERNCOMBINATION) && callback)
00208 {
00209 W = new float64_t[totdoc*kernel->get_num_subkernels()];
00210 for (i=0; i<totdoc*kernel->get_num_subkernels(); i++)
00211 W[i]=0;
00212 }
00213
00214
00215 if((learn_parm->svm_newvarsinqp < 2)
00216 || (learn_parm->svm_newvarsinqp > learn_parm->svm_maxqpsize)) {
00217 learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize;
00218 }
00219
00220 init_shrink_state(&shrink_state,totdoc,(int32_t)MAXSHRINK);
00221
00222 inconsistent = new int32_t[totdoc];
00223 a = new float64_t[totdoc];
00224 a_fullset = new float64_t[totdoc];
00225 xi_fullset = new float64_t[totdoc];
00226 lin = new float64_t[totdoc];
00227 learn_parm->svm_cost = new float64_t[totdoc];
00228 if (m_linear_term_len > 0)
00229 learn_parm->eps=get_linear_term_array();
00230 else
00231 {
00232 learn_parm->eps=new float64_t[totdoc];
00233 CMath::fill_vector(learn_parm->eps, totdoc, tube_epsilon);
00234 }
00235
00236 delete[] model->supvec;
00237 delete[] model->alpha;
00238 delete[] model->index;
00239 model->supvec = new int32_t[totdoc+2];
00240 model->alpha = new float64_t[totdoc+2];
00241 model->index = new int32_t[totdoc+2];
00242
00243 model->at_upper_bound=0;
00244 model->b=0;
00245 model->supvec[0]=0;
00246 model->alpha[0]=0;
00247 model->totdoc=totdoc;
00248
00249 model->kernel=kernel;
00250
00251 model->sv_num=1;
00252 model->loo_error=-1;
00253 model->loo_recall=-1;
00254 model->loo_precision=-1;
00255 model->xa_error=-1;
00256 model->xa_recall=-1;
00257 model->xa_precision=-1;
00258 inconsistentnum=0;
00259
00260 for(i=0;i<totdoc;i++) {
00261 inconsistent[i]=0;
00262 a[i]=0;
00263 lin[i]=0;
00264
00265 if(label[i] > 0) {
00266 learn_parm->svm_cost[i]=learn_parm->svm_c*learn_parm->svm_costratio*
00267 fabs((float64_t)label[i]);
00268 }
00269 else if(label[i] < 0) {
00270 learn_parm->svm_cost[i]=learn_parm->svm_c*fabs((float64_t)label[i]);
00271 }
00272 else
00273 ASSERT(false);
00274 }
00275
00276 if(verbosity==1) {
00277 SG_DEBUG( "Optimizing...\n");
00278 }
00279
00280
00281 SG_DEBUG( "num_train: %d\n", totdoc);
00282 iterations=optimize_to_convergence(docs,label,totdoc,
00283 &shrink_state,inconsistent,a,lin,
00284 c,&timing_profile,
00285 &maxdiff,(int32_t)-1,
00286 (int32_t)1);
00287
00288
00289 if(verbosity>=1) {
00290 SG_DONE();
00291 SG_INFO("(%ld iterations)\n",iterations);
00292 SG_INFO( "Optimization finished (maxdiff=%.8f).\n",maxdiff);
00293 SG_INFO( "obj = %.16f, rho = %.16f\n",get_objective(),model->b);
00294
00295 runtime_end=get_runtime();
00296 upsupvecnum=0;
00297
00298 SG_DEBUG( "num sv: %d\n", model->sv_num);
00299 for(i=1;i<model->sv_num;i++)
00300 {
00301 if(fabs(model->alpha[i]) >=
00302 (learn_parm->svm_cost[model->supvec[i]]-
00303 learn_parm->epsilon_a))
00304 upsupvecnum++;
00305 }
00306 SG_INFO( "Number of SV: %ld (including %ld at upper bound)\n",
00307 model->sv_num-1,upsupvecnum);
00308 }
00309
00310
00311
00312 for(i=1;i<model->sv_num;i++) {
00313 j=model->supvec[i];
00314 if(j >= (totdoc/2)) {
00315 j=totdoc-j-1;
00316 }
00317 model->supvec[i]=j;
00318 }
00319
00320 shrink_state_cleanup(&shrink_state);
00321 delete[] label;
00322 delete[] inconsistent;
00323 delete[] c;
00324 delete[] a;
00325 delete[] a_fullset;
00326 delete[] xi_fullset;
00327 delete[] lin;
00328 delete[] learn_parm->svm_cost;
00329 delete[] docs;
00330 }
00331
00332 float64_t CSVRLight::compute_objective_function(
00333 float64_t *a, float64_t *lin, float64_t *c, float64_t* eps, int32_t *label,
00334 int32_t totdoc)
00335 {
00336
00337 float64_t criterion=0;
00338
00339 for(int32_t i=0;i<totdoc;i++)
00340 criterion+=(eps[i]-(float64_t)label[i]*c[i])*a[i]+0.5*a[i]*label[i]*lin[i];
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353 return(criterion);
00354 }
00355
00356 void* CSVRLight::update_linear_component_linadd_helper(void *params_)
00357 {
00358 S_THREAD_PARAM * params = (S_THREAD_PARAM*) params_ ;
00359
00360 int32_t jj=0, j=0 ;
00361
00362 for(jj=params->start;(jj<params->end) && (j=params->active2dnum[jj])>=0;jj++)
00363 params->lin[j]+=params->kernel->compute_optimized(CSVRLight::regression_fix_index2(params->docs[j], params->num_vectors));
00364
00365 return NULL ;
00366 }
00367
00368
00369 void CSVRLight::update_linear_component(
00370 int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
00371 float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
00372 float64_t *aicache, float64_t* c)
00373
00374
00375
00376
00377 {
00378 register int32_t i=0,ii=0,j=0,jj=0;
00379
00380 if (kernel->has_property(KP_LINADD) && get_linadd_enabled())
00381 {
00382 if (callback)
00383 {
00384 update_linear_component_mkl_linadd(docs, label, active2dnum, a, a_old, working2dnum,
00385 totdoc, lin, aicache, c) ;
00386 }
00387 else
00388 {
00389 kernel->clear_normal();
00390
00391 int32_t num_working=0;
00392 for(ii=0;(i=working2dnum[ii])>=0;ii++) {
00393 if(a[i] != a_old[i]) {
00394 kernel->add_to_normal(regression_fix_index(docs[i]), (a[i]-a_old[i])*(float64_t)label[i]);
00395 num_working++;
00396 }
00397 }
00398
00399 if (num_working>0)
00400 {
00401 if (parallel->get_num_threads() < 2)
00402 {
00403 for(jj=0;(j=active2dnum[jj])>=0;jj++) {
00404 lin[j]+=kernel->compute_optimized(regression_fix_index(docs[j]));
00405 }
00406 }
00407 #ifndef WIN32
00408 else
00409 {
00410 int32_t num_elem = 0 ;
00411 for(jj=0;(j=active2dnum[jj])>=0;jj++) num_elem++ ;
00412
00413 pthread_t* threads = new pthread_t[parallel->get_num_threads()-1] ;
00414 S_THREAD_PARAM* params = new S_THREAD_PARAM[parallel->get_num_threads()-1] ;
00415 int32_t start = 0 ;
00416 int32_t step = num_elem/parallel->get_num_threads() ;
00417 int32_t end = step ;
00418
00419 for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
00420 {
00421 params[t].kernel = kernel ;
00422 params[t].lin = lin ;
00423 params[t].docs = docs ;
00424 params[t].active2dnum=active2dnum ;
00425 params[t].start = start ;
00426 params[t].end = end ;
00427 params[t].num_vectors=num_vectors ;
00428
00429 start=end ;
00430 end+=step ;
00431 pthread_create(&threads[t], NULL, update_linear_component_linadd_helper, (void*)¶ms[t]) ;
00432 }
00433
00434 for(jj=params[parallel->get_num_threads()-2].end;(j=active2dnum[jj])>=0;jj++) {
00435 lin[j]+=kernel->compute_optimized(regression_fix_index(docs[j]));
00436 }
00437 void* ret;
00438 for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
00439 pthread_join(threads[t], &ret) ;
00440
00441 delete[] params;
00442 delete[] threads;
00443 }
00444 #endif
00445 }
00446 }
00447 }
00448 else
00449 {
00450 if (callback)
00451 {
00452 update_linear_component_mkl(docs, label, active2dnum,
00453 a, a_old, working2dnum, totdoc, lin, aicache, c) ;
00454 }
00455 else {
00456 for(jj=0;(i=working2dnum[jj])>=0;jj++) {
00457 if(a[i] != a_old[i]) {
00458 kernel->get_kernel_row(i,active2dnum,aicache);
00459 for(ii=0;(j=active2dnum[ii])>=0;ii++)
00460 lin[j]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
00461 }
00462 }
00463 }
00464 }
00465 }
00466
00467 void CSVRLight::update_linear_component_mkl(
00468 int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
00469 float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
00470 float64_t *aicache, float64_t* c)
00471 {
00472 int32_t num = totdoc;
00473 int32_t num_weights = -1;
00474 int32_t num_kernels = kernel->get_num_subkernels() ;
00475 const float64_t* old_beta = kernel->get_subkernel_weights(num_weights);
00476
00477 ASSERT(num_weights==num_kernels);
00478
00479 if ((kernel->get_kernel_type()==K_COMBINED) &&
00480 (!((CCombinedKernel*)kernel)->get_append_subkernel_weights()))
00481 {
00482 CCombinedKernel* k = (CCombinedKernel*) kernel;
00483 CKernel* kn = k->get_first_kernel() ;
00484 int32_t n = 0, i, j ;
00485
00486 while (kn!=NULL)
00487 {
00488 for(i=0;i<num;i++)
00489 {
00490 if(a[i] != a_old[i])
00491 {
00492 kn->get_kernel_row(i,NULL,aicache, true);
00493 for(j=0;j<num;j++)
00494 W[j*num_kernels+n]+=(a[i]-a_old[i])*aicache[regression_fix_index(j)]*(float64_t)label[i];
00495 }
00496 }
00497 SG_UNREF(kn);
00498 kn = k->get_next_kernel();
00499 n++ ;
00500 }
00501 }
00502 else
00503 {
00504 float64_t* w_backup = new float64_t[num_kernels] ;
00505 float64_t* w1 = new float64_t[num_kernels] ;
00506
00507
00508 for (int32_t i=0; i<num_kernels; i++)
00509 {
00510 w_backup[i] = old_beta[i] ;
00511 w1[i]=0.0 ;
00512 }
00513 for (int32_t n=0; n<num_kernels; n++)
00514 {
00515 w1[n]=1.0 ;
00516 kernel->set_subkernel_weights(w1, num_weights) ;
00517
00518 for(int32_t i=0;i<num;i++)
00519 {
00520 if(a[i] != a_old[i])
00521 {
00522 for(int32_t j=0;j<num;j++)
00523 W[j*num_kernels+n]+=(a[i]-a_old[i])*compute_kernel(i,j)*(float64_t)label[i];
00524 }
00525 }
00526 w1[n]=0.0 ;
00527 }
00528
00529
00530 kernel->set_subkernel_weights(w_backup,num_weights) ;
00531
00532 delete[] w_backup ;
00533 delete[] w1 ;
00534 }
00535
00536 call_mkl_callback(a, label, lin, c, totdoc);
00537 }
00538
00539
00540 void CSVRLight::update_linear_component_mkl_linadd(
00541 int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
00542 float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
00543 float64_t *aicache, float64_t* c)
00544 {
00545
00546
00547 int32_t num = totdoc;
00548 int32_t num_weights = -1;
00549 int32_t num_kernels = kernel->get_num_subkernels() ;
00550 const float64_t* old_beta = kernel->get_subkernel_weights(num_weights);
00551
00552 ASSERT(num_weights==num_kernels);
00553
00554 float64_t* w_backup=new float64_t[num_kernels];
00555 float64_t* w1=new float64_t[num_kernels];
00556
00557
00558 for (int32_t i=0; i<num_kernels; i++)
00559 {
00560 w_backup[i] = old_beta[i] ;
00561 w1[i]=1.0 ;
00562 }
00563
00564 kernel->set_subkernel_weights(w1, num_weights) ;
00565
00566
00567 kernel->clear_normal();
00568 for(int32_t ii=0, i=0;(i=working2dnum[ii])>=0;ii++) {
00569 if(a[i] != a_old[i]) {
00570 kernel->add_to_normal(regression_fix_index(docs[i]), (a[i]-a_old[i])*(float64_t)label[i]);
00571 }
00572 }
00573
00574
00575 for (int32_t i=0; i<num; i++)
00576 kernel->compute_by_subkernel(i,&W[i*num_kernels]) ;
00577
00578
00579 kernel->set_subkernel_weights(w_backup,num_weights) ;
00580
00581 delete[] w_backup ;
00582 delete[] w1 ;
00583
00584 call_mkl_callback(a, label, lin, c, totdoc);
00585 }
00586
00587 void CSVRLight::call_mkl_callback(float64_t* a, int32_t* label, float64_t* lin, float64_t* c, int32_t totdoc)
00588 {
00589 int32_t num = totdoc;
00590 int32_t num_kernels = kernel->get_num_subkernels() ;
00591 int nk = (int) num_kernels;
00592 float64_t sumalpha = 0;
00593 float64_t* sumw=new float64_t[num_kernels];
00594
00595 for (int32_t i=0; i<num; i++)
00596 sumalpha-=a[i]*(learn_parm->eps[i]-label[i]*c[i]);
00597
00598 #ifdef HAVE_LAPACK
00599 double* alphay = new double[num];
00600 for (int32_t i=0; i<num; i++)
00601 alphay[i]=a[i]*label[i];
00602
00603 for (int32_t i=0; i<num_kernels; i++)
00604 sumw[i]=0;
00605
00606 cblas_dgemv(CblasColMajor, CblasNoTrans, nk, (int) num, 0.5, (double*) W,
00607 nk, (double*) alphay, 1, 1.0, (double*) sumw, 1);
00608
00609 delete[] alphay;
00610 #else
00611 for (int32_t d=0; d<num_kernels; d++)
00612 {
00613 sumw[d]=0;
00614 for(int32_t i=0; i<num; i++)
00615 sumw[d] += 0.5*a[i]*label[i]*W[i*num_kernels+d];
00616 }
00617 #endif
00618
00619 if (callback)
00620 mkl_converged=callback(mkl, sumw, sumalpha);
00621
00622 const float64_t* new_beta = kernel->get_subkernel_weights(num_kernels);
00623
00624
00625 #ifdef HAVE_LAPACK
00626 cblas_dgemv(CblasColMajor, CblasTrans, nk, (int) num, 1.0, (double*) W,
00627 nk, (double*) new_beta, 1, 0.0, (double*) lin, 1);
00628 #else
00629 for(int32_t i=0; i<num; i++)
00630 lin[i]=0 ;
00631 for (int32_t d=0; d<num_kernels; d++)
00632 if (new_beta[d]!=0)
00633 for(int32_t i=0; i<num; i++)
00634 lin[i] += new_beta[d]*W[i*num_kernels+d] ;
00635 #endif
00636
00637
00638 delete[] sumw;
00639 }
00640
00641
00642 void CSVRLight::reactivate_inactive_examples(
00643 int32_t* label, float64_t *a, SHRINK_STATE *shrink_state, float64_t *lin,
00644 float64_t *c, int32_t totdoc, int32_t iteration, int32_t *inconsistent,
00645 int32_t* docs, float64_t *aicache, float64_t *maxdiff)
00646
00647
00648
00649 {
00650 register int32_t i=0,j,ii=0,jj,t,*changed2dnum,*inactive2dnum;
00651 int32_t *changed,*inactive;
00652 register float64_t *a_old,dist;
00653 float64_t ex_c,target;
00654
00655 if (kernel->has_property(KP_LINADD) && get_linadd_enabled()) {
00656 a_old=shrink_state->last_a;
00657
00658 kernel->clear_normal();
00659 int32_t num_modified=0;
00660 for(i=0;i<totdoc;i++) {
00661 if(a[i] != a_old[i]) {
00662 kernel->add_to_normal(regression_fix_index(docs[i]), ((a[i]-a_old[i])*(float64_t)label[i]));
00663 a_old[i]=a[i];
00664 num_modified++;
00665 }
00666 }
00667
00668 if (num_modified>0)
00669 {
00670 for(i=0;i<totdoc;i++) {
00671 if(!shrink_state->active[i]) {
00672 lin[i]=shrink_state->last_lin[i]+kernel->compute_optimized(regression_fix_index(docs[i]));
00673 }
00674 shrink_state->last_lin[i]=lin[i];
00675 }
00676 }
00677 }
00678 else
00679 {
00680 changed=new int32_t[totdoc];
00681 changed2dnum=new int32_t[totdoc+11];
00682 inactive=new int32_t[totdoc];
00683 inactive2dnum=new int32_t[totdoc+11];
00684 for(t=shrink_state->deactnum-1;(t>=0) && shrink_state->a_history[t];t--) {
00685 if(verbosity>=2) {
00686 SG_INFO( "%ld..",t);
00687 }
00688 a_old=shrink_state->a_history[t];
00689 for(i=0;i<totdoc;i++) {
00690 inactive[i]=((!shrink_state->active[i])
00691 && (shrink_state->inactive_since[i] == t));
00692 changed[i]= (a[i] != a_old[i]);
00693 }
00694 compute_index(inactive,totdoc,inactive2dnum);
00695 compute_index(changed,totdoc,changed2dnum);
00696
00697 for(ii=0;(i=changed2dnum[ii])>=0;ii++) {
00698 CKernelMachine::kernel->get_kernel_row(i,inactive2dnum,aicache);
00699 for(jj=0;(j=inactive2dnum[jj])>=0;jj++)
00700 lin[j]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
00701 }
00702 }
00703 delete[] changed;
00704 delete[] changed2dnum;
00705 delete[] inactive;
00706 delete[] inactive2dnum;
00707 }
00708
00709 (*maxdiff)=0;
00710 for(i=0;i<totdoc;i++) {
00711 shrink_state->inactive_since[i]=shrink_state->deactnum-1;
00712 if(!inconsistent[i]) {
00713 dist=(lin[i]-model->b)*(float64_t)label[i];
00714 target=-(learn_parm->eps[i]-(float64_t)label[i]*c[i]);
00715 ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;
00716 if((a[i]>learn_parm->epsilon_a) && (dist > target)) {
00717 if((dist-target)>(*maxdiff))
00718 (*maxdiff)=dist-target;
00719 }
00720 else if((a[i]<ex_c) && (dist < target)) {
00721 if((target-dist)>(*maxdiff))
00722 (*maxdiff)=target-dist;
00723 }
00724 if((a[i]>(0+learn_parm->epsilon_a))
00725 && (a[i]<ex_c)) {
00726 shrink_state->active[i]=1;
00727 }
00728 else if((a[i]<=(0+learn_parm->epsilon_a)) && (dist < (target+learn_parm->epsilon_shrink))) {
00729 shrink_state->active[i]=1;
00730 }
00731 else if((a[i]>=ex_c)
00732 && (dist > (target-learn_parm->epsilon_shrink))) {
00733 shrink_state->active[i]=1;
00734 }
00735 else if(learn_parm->sharedslack) {
00736 shrink_state->active[i]=1;
00737 }
00738 }
00739 }
00740 if (use_kernel_cache) {
00741 for(i=0;i<totdoc;i++) {
00742 (shrink_state->a_history[shrink_state->deactnum-1])[i]=a[i];
00743 }
00744 for(t=shrink_state->deactnum-2;(t>=0) && shrink_state->a_history[t];t--) {
00745 delete[] shrink_state->a_history[t];
00746 shrink_state->a_history[t]=0;
00747 }
00748 }
00749 }
00750 #endif //USE_SVMLIGHT