SHOGUN  v3.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
SVMLight.cpp
Go to the documentation of this file.
1 /***********************************************************************/
2 /* */
3 /* SVMLight.cpp */
4 /* */
5 /* Author: Thorsten Joachims */
6 /* Date: 19.07.99 */
7 /* */
8 /* Copyright (c) 1999 Universitaet Dortmund - All rights reserved */
9 /* */
10 /* This software is available for non-commercial use only. It must */
11 /* not be modified and distributed without prior permission of the */
12 /* author. The author is not responsible for implications from the */
13 /* use of this software. */
14 /* */
15 /* THIS INCLUDES THE FOLLOWING ADDITIONS */
16 /* Generic Kernel Interfacing: Soeren Sonnenburg */
17 /* Parallizations: Soeren Sonnenburg */
18 /* Multiple Kernel Learning: Gunnar Raetsch, Soeren Sonnenburg, */
19 /* Alexander Zien, Marius Kloft, Chen Guohua */
20 /* Linadd Speedup: Gunnar Raetsch, Soeren Sonnenburg */
21 /* */
22 /***********************************************************************/
23 #include <shogun/lib/config.h>
24 
25 #ifdef USE_SVMLIGHT
26 
27 #include <shogun/io/SGIO.h>
28 #include <shogun/lib/Signal.h>
30 #include <shogun/lib/Time.h>
32 
34 #include <shogun/lib/external/pr_loqo.h>
35 
36 #include <shogun/kernel/Kernel.h>
39 
40 #include <unistd.h>
41 
42 #include <shogun/base/Parallel.h>
44 
45 #ifdef HAVE_PTHREAD
46 #include <pthread.h>
47 #endif
48 
49 using namespace shogun;
50 
51 #ifndef DOXYGEN_SHOULD_SKIP_THIS
52 struct S_THREAD_PARAM_REACTIVATE_LINADD
53 {
54  CKernel* kernel;
55  float64_t* lin;
56  float64_t* last_lin;
57  int32_t* active;
58  int32_t* docs;
59  int32_t start;
60  int32_t end;
61 };
62 
63 struct S_THREAD_PARAM_SVMLIGHT
64 {
65  float64_t * lin ;
66  float64_t* W;
67  int32_t start, end;
68  int32_t * active2dnum ;
69  int32_t * docs ;
70  CKernel* kernel ;
71 };
72 
73 struct S_THREAD_PARAM_REACTIVATE_VANILLA
74 {
75  CKernel* kernel;
76  float64_t* lin;
77  float64_t* aicache;
78  float64_t* a;
79  float64_t* a_old;
80  int32_t* changed2dnum;
81  int32_t* inactive2dnum;
82  int32_t* label;
83  int32_t start;
84  int32_t end;
85 };
86 
87 struct S_THREAD_PARAM_KERNEL
88 {
89  float64_t *Kval ;
90  int32_t *KI, *KJ ;
91  int32_t start, end;
92  CSVMLight* svmlight;
93 };
94 
95 #endif // DOXYGEN_SHOULD_SKIP_THIS
96 
98 {
99  S_THREAD_PARAM_SVMLIGHT* params = (S_THREAD_PARAM_SVMLIGHT*) p;
100 
101  int32_t jj=0, j=0 ;
102 
103  for (jj=params->start;(jj<params->end) && (j=params->active2dnum[jj])>=0;jj++)
104  params->lin[j]+=params->kernel->compute_optimized(params->docs[j]);
105 
106  return NULL ;
107 }
108 
110 {
111  S_THREAD_PARAM_KERNEL* params = (S_THREAD_PARAM_KERNEL*) p;
112 
113  int32_t jj=0 ;
114  for (jj=params->start;jj<params->end;jj++)
115  params->Kval[jj]=params->svmlight->compute_kernel(params->KI[jj], params->KJ[jj]) ;
116 
117  return NULL ;
118 }
119 
121 : CSVM()
122 {
123  init();
124  set_kernel(NULL);
125 }
126 
128 : CSVM(C, k, lab)
129 {
130  init();
131 }
132 
134 {
135  //optimizer settings
136  primal=NULL;
137  dual=NULL;
138  init_margin=0.15;
139  init_iter=500;
141  model_b=0;
142  verbosity=1;
144 
145  // svm variables
146  W=NULL;
147  model=SG_MALLOC(MODEL, 1);
148  learn_parm=SG_MALLOC(LEARN_PARM, 1);
149  model->supvec=NULL;
150  model->alpha=NULL;
151  model->index=NULL;
152 
153  // MKL stuff
154  mymaxdiff=1 ;
155  mkl_converged=false;
156 }
157 
159 {
160 
161  SG_FREE(model->supvec);
162  SG_FREE(model->alpha);
163  SG_FREE(model->index);
164  SG_FREE(model);
165  SG_FREE(learn_parm);
166 
167  // MKL stuff
168  SG_FREE(W);
169 
170  // optimizer variables
171  SG_FREE(dual);
172  SG_FREE(primal);
173 }
174 
176 {
177  //certain setup params
178  mkl_converged=false;
179  verbosity=1 ;
180  init_margin=0.15;
181  init_iter=500;
184 
185  strcpy (learn_parm->predfile, "");
186  learn_parm->biased_hyperplane= get_bias_enabled() ? 1 : 0;
187  learn_parm->sharedslack=0;
188  learn_parm->remove_inconsistent=0;
189  learn_parm->skip_final_opt_check=0;
190  learn_parm->svm_maxqpsize=get_qpsize();
191  learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize-1;
192  learn_parm->maxiter=100000;
193  learn_parm->svm_iter_to_shrink=100;
194  learn_parm->svm_c=C1;
195  learn_parm->transduction_posratio=0.33;
196  learn_parm->svm_costratio=C2/C1;
197  learn_parm->svm_costratio_unlab=1.0;
198  learn_parm->svm_unlabbound=1E-5;
199  learn_parm->epsilon_crit=epsilon; // GU: better decrease it ... ??
200  learn_parm->epsilon_a=1E-15;
201  learn_parm->compute_loo=0;
202  learn_parm->rho=1.0;
203  learn_parm->xa_depth=0;
204 
205  if (!kernel)
206  SG_ERROR("SVM_light can not proceed without kernel!\n")
207 
208  if (data)
209  {
210  if (m_labels->get_num_labels() != data->get_num_vectors())
211  {
212  SG_ERROR("%s::train_machine(): Number of training vectors (%d) does"
213  " not match number of labels (%d)\n", get_name(),
215  }
216  kernel->init(data, data);
217  }
218 
219  if (!kernel->has_features())
220  SG_ERROR("SVM_light can not proceed without initialized kernel!\n")
221 
225 
226  // in case of LINADD enabled kernels cleanup!
228  kernel->clear_normal() ;
229 
230  // output some info
231  SG_DEBUG("threads = %i\n", parallel->get_num_threads())
232  SG_DEBUG("qpsize = %i\n", learn_parm->svm_maxqpsize)
233  SG_DEBUG("epsilon = %1.1e\n", learn_parm->epsilon_crit)
234  SG_DEBUG("kernel->has_property(KP_LINADD) = %i\n", kernel->has_property(KP_LINADD))
235  SG_DEBUG("kernel->has_property(KP_KERNCOMBINATION) = %i\n", kernel->has_property(KP_KERNCOMBINATION))
236  SG_DEBUG("kernel->has_property(KP_BATCHEVALUATION) = %i\n", kernel->has_property(KP_BATCHEVALUATION))
237  SG_DEBUG("kernel->get_optimization_type() = %s\n", kernel->get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" : "SLOWBUTMEMEFFICIENT" )
238  SG_DEBUG("get_solver_type() = %i\n", get_solver_type())
239  SG_DEBUG("get_linadd_enabled() = %i\n", get_linadd_enabled())
240  SG_DEBUG("get_batch_computation_enabled() = %i\n", get_batch_computation_enabled())
241  SG_DEBUG("kernel->get_num_subkernels() = %i\n", kernel->get_num_subkernels())
242 
245 
246  SG_DEBUG("use_kernel_cache = %i\n", use_kernel_cache)
247 
249  {
250 
251  for (index_t k_idx=0; k_idx<((CCombinedKernel*) kernel)->get_num_kernels(); k_idx++)
252  {
253  CKernel* kn = ((CCombinedKernel*) kernel)->get_kernel(k_idx);
254  // allocate kernel cache but clean up beforehand
256  SG_UNREF(kn);
257  }
258  }
259 
261 
262  // train the svm
263  svm_learn();
264 
265  // brain damaged svm light work around
266  create_new_model(model->sv_num-1);
267  set_bias(-model->b);
268  for (int32_t i=0; i<model->sv_num-1; i++)
269  {
270  set_alpha(i, model->alpha[i+1]);
271  set_support_vector(i, model->supvec[i+1]);
272  }
273 
274  // in case of LINADD enabled kernels cleanup!
276  {
277  kernel->clear_normal() ;
279  }
280 
281  if (use_kernel_cache)
283 
284  return true ;
285 }
286 
288 {
289  clock_t start;
290  start = clock();
291  return((int32_t)((float64_t)start*100.0/(float64_t)CLOCKS_PER_SEC));
292 }
293 
294 
296 {
297  int32_t *inconsistent, i;
298  int32_t misclassified,upsupvecnum;
299  float64_t maxdiff, *lin, *c, *a;
300  int32_t iterations;
301  int32_t trainpos=0, trainneg=0 ;
303  SGVector<int32_t> lab=((CBinaryLabels*) m_labels)->get_int_labels();
304  int32_t totdoc=lab.vlen;
305  ASSERT(lab.vector && lab.vlen)
306  int32_t* label=SGVector<int32_t>::clone_vector(lab.vector, lab.vlen);
307 
308  int32_t* docs=SG_MALLOC(int32_t, totdoc);
309  SG_FREE(W);
310  W=NULL;
311  count = 0 ;
312 
314  {
315  W = SG_MALLOC(float64_t, totdoc*kernel->get_num_subkernels());
316  for (i=0; i<totdoc*kernel->get_num_subkernels(); i++)
317  W[i]=0;
318  }
319 
320  for (i=0; i<totdoc; i++)
321  docs[i]=i;
322 
323  float64_t *xi_fullset; /* buffer for storing xi on full sample in loo */
324  float64_t *a_fullset; /* buffer for storing alpha on full sample in loo */
325  TIMING timing_profile;
326  SHRINK_STATE shrink_state;
327 
328  timing_profile.time_kernel=0;
329  timing_profile.time_opti=0;
330  timing_profile.time_shrink=0;
331  timing_profile.time_update=0;
332  timing_profile.time_model=0;
333  timing_profile.time_check=0;
334  timing_profile.time_select=0;
335 
336  /* make sure -n value is reasonable */
337  if((learn_parm->svm_newvarsinqp < 2)
338  || (learn_parm->svm_newvarsinqp > learn_parm->svm_maxqpsize)) {
339  learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize;
340  }
341 
342  init_shrink_state(&shrink_state,totdoc,(int32_t)MAXSHRINK);
343 
344  inconsistent = SG_MALLOC(int32_t, totdoc);
345  c = SG_MALLOC(float64_t, totdoc);
346  a = SG_MALLOC(float64_t, totdoc);
347  a_fullset = SG_MALLOC(float64_t, totdoc);
348  xi_fullset = SG_MALLOC(float64_t, totdoc);
349  lin = SG_MALLOC(float64_t, totdoc);
350  if (m_linear_term.vlen>0)
352  else
353  {
354  learn_parm->eps=SG_MALLOC(float64_t, totdoc); /* equivalent regression epsilon for classification */
355  SGVector<float64_t>::fill_vector(learn_parm->eps, totdoc, -1.0);
356  }
357 
358  learn_parm->svm_cost = SG_MALLOC(float64_t, totdoc);
359 
360  SG_FREE(model->supvec);
361  SG_FREE(model->alpha);
362  SG_FREE(model->index);
363  model->supvec = SG_MALLOC(int32_t, totdoc+2);
364  model->alpha = SG_MALLOC(float64_t, totdoc+2);
365  model->index = SG_MALLOC(int32_t, totdoc+2);
366 
367  model->at_upper_bound=0;
368  model->b=0;
369  model->supvec[0]=0; /* element 0 reserved and empty for now */
370  model->alpha[0]=0;
371  model->totdoc=totdoc;
372 
373  model->kernel=kernel;
374 
375  model->sv_num=1;
376  model->loo_error=-1;
377  model->loo_recall=-1;
378  model->loo_precision=-1;
379  model->xa_error=-1;
380  model->xa_recall=-1;
381  model->xa_precision=-1;
382 
383  for (i=0;i<totdoc;i++) { /* various inits */
384  inconsistent[i]=0;
385  c[i]=0;
386  a[i]=0;
387  lin[i]=0;
388 
389  if(label[i] > 0) {
390  learn_parm->svm_cost[i]=learn_parm->svm_c*learn_parm->svm_costratio*
391  fabs((float64_t)label[i]);
392  label[i]=1;
393  trainpos++;
394  }
395  else if(label[i] < 0) {
396  learn_parm->svm_cost[i]=learn_parm->svm_c*fabs((float64_t)label[i]);
397  label[i]=-1;
398  trainneg++;
399  }
400  else {
401  learn_parm->svm_cost[i]=0;
402  }
403  }
404 
405  /* compute starting state for initial alpha values */
406  SG_DEBUG("alpha:%d num_sv:%d\n", m_alpha.vector, get_num_support_vectors())
407 
409  if(verbosity>=1) {
410  SG_INFO("Computing starting state...")
411  }
412 
413  float64_t* alpha = SG_MALLOC(float64_t, totdoc);
414 
415  for (i=0; i<totdoc; i++)
416  alpha[i]=0;
417 
418  for (i=0; i<get_num_support_vectors(); i++)
419  alpha[get_support_vector(i)]=get_alpha(i);
420 
421  int32_t* index = SG_MALLOC(int32_t, totdoc);
422  int32_t* index2dnum = SG_MALLOC(int32_t, totdoc+11);
423  float64_t* aicache = SG_MALLOC(float64_t, totdoc);
424  for (i=0;i<totdoc;i++) { /* create full index and clip alphas */
425  index[i]=1;
426  alpha[i]=fabs(alpha[i]);
427  if(alpha[i]<0) alpha[i]=0;
428  if(alpha[i]>learn_parm->svm_cost[i]) alpha[i]=learn_parm->svm_cost[i];
429  }
430 
431  if (use_kernel_cache)
432  {
433  if (callback &&
434  (!((CCombinedKernel*) kernel)->get_append_subkernel_weights())
435  )
436  {
438  for (index_t k_idx=0; k_idx<k->get_num_kernels(); k_idx++)
439  {
440  CKernel* kn = k->get_kernel(k_idx);
441  for (i=0;i<totdoc;i++) // fill kernel cache with unbounded SV
442  if((alpha[i]>0) && (alpha[i]<learn_parm->svm_cost[i])
443  && (kn->kernel_cache_space_available()))
444  kn->cache_kernel_row(i);
445 
446  for (i=0;i<totdoc;i++) // fill rest of kernel cache with bounded SV
447  if((alpha[i]==learn_parm->svm_cost[i])
448  && (kn->kernel_cache_space_available()))
449  kn->cache_kernel_row(i);
450 
451  SG_UNREF(kn);
452  }
453  }
454  else
455  {
456  for (i=0;i<totdoc;i++) /* fill kernel cache with unbounded SV */
457  if((alpha[i]>0) && (alpha[i]<learn_parm->svm_cost[i])
460 
461  for (i=0;i<totdoc;i++) /* fill rest of kernel cache with bounded SV */
462  if((alpha[i]==learn_parm->svm_cost[i])
465  }
466  }
467  compute_index(index,totdoc,index2dnum);
468  update_linear_component(docs,label,index2dnum,alpha,a,index2dnum,totdoc,
469  lin,aicache,NULL);
470  calculate_svm_model(docs,label,lin,alpha,a,c,
471  index2dnum,index2dnum);
472  for (i=0;i<totdoc;i++) { /* copy initial alphas */
473  a[i]=alpha[i];
474  }
475 
476  SG_FREE(index);
477  SG_FREE(index2dnum);
478  SG_FREE(aicache);
479  SG_FREE(alpha);
480 
481  if(verbosity>=1)
482  SG_DONE()
483  }
484  SG_DEBUG("%d totdoc %d pos %d neg\n", totdoc, trainpos, trainneg)
485  SG_DEBUG("Optimizing...\n")
486 
487  /* train the svm */
488  iterations=optimize_to_convergence(docs,label,totdoc,
489  &shrink_state,inconsistent,a,lin,
490  c,&timing_profile,
491  &maxdiff,(int32_t)-1,
492  (int32_t)1);
493 
494 
495  if(verbosity>=1) {
496  if(verbosity==1)
497  {
498  SG_DONE()
499  SG_DEBUG("(%ld iterations)", iterations)
500  }
501 
502  misclassified=0;
503  for (i=0;(i<totdoc);i++) { /* get final statistic */
504  if((lin[i]-model->b)*(float64_t)label[i] <= 0.0)
505  misclassified++;
506  }
507 
508  SG_INFO("Optimization finished (%ld misclassified, maxdiff=%.8f).\n",
509  misclassified,maxdiff);
510 
511  SG_INFO("obj = %.16f, rho = %.16f\n",get_objective(),model->b)
512  if (maxdiff>epsilon)
513  SG_WARNING("maximum violation (%f) exceeds svm_epsilon (%f) due to numerical difficulties\n", maxdiff, epsilon)
514 
515  upsupvecnum=0;
516  for (i=1;i<model->sv_num;i++)
517  {
518  if(fabs(model->alpha[i]) >=
519  (learn_parm->svm_cost[model->supvec[i]]-
520  learn_parm->epsilon_a))
521  upsupvecnum++;
522  }
523  SG_INFO("Number of SV: %d (including %d at upper bound)\n",
524  model->sv_num-1,upsupvecnum);
525  }
526 
527  shrink_state_cleanup(&shrink_state);
528  SG_FREE(label);
529  SG_FREE(inconsistent);
530  SG_FREE(c);
531  SG_FREE(a);
532  SG_FREE(a_fullset);
533  SG_FREE(xi_fullset);
534  SG_FREE(lin);
535  SG_FREE(learn_parm->eps);
536  SG_FREE(learn_parm->svm_cost);
537  SG_FREE(docs);
538 }
539 
540 int32_t CSVMLight::optimize_to_convergence(int32_t* docs, int32_t* label, int32_t totdoc,
541  SHRINK_STATE *shrink_state,
542  int32_t *inconsistent,
543  float64_t *a, float64_t *lin, float64_t *c,
544  TIMING *timing_profile, float64_t *maxdiff,
545  int32_t heldout, int32_t retrain)
546  /* docs: Training vectors (x-part) */
547  /* label: Training labels/value (y-part, zero if test example for
548  transduction) */
549  /* totdoc: Number of examples in docs/label */
550  /* laern_parm: Learning paramenters */
551  /* kernel_parm: Kernel paramenters */
552  /* kernel_cache: Initialized/partly filled Cache, if using a kernel.
553  NULL if linear. */
554  /* shrink_state: State of active variables */
555  /* inconsistent: examples thrown out as inconstistent */
556  /* a: alphas */
557  /* lin: linear component of gradient */
558  /* c: right hand side of inequalities (margin) */
559  /* maxdiff: returns maximum violation of KT-conditions */
560  /* heldout: marks held-out example for leave-one-out (or -1) */
561  /* retrain: selects training mode (1=regular / 2=holdout) */
562 {
563 
564  int32_t *chosen,*key,i,j,jj,*last_suboptimal_at,noshrink;
565  int32_t inconsistentnum,choosenum,already_chosen=0,iteration;
566  int32_t misclassified,supvecnum=0,*active2dnum,inactivenum;
567  int32_t *working2dnum,*selexam;
568  int32_t activenum;
569  float64_t criterion, eq;
570  float64_t *a_old;
571  int32_t t0=0,t1=0,t2=0,t3=0,t4=0,t5=0,t6=0; /* timing */
572  float64_t epsilon_crit_org;
573  float64_t bestmaxdiff;
574  float64_t worstmaxdiff;
575  int32_t bestmaxdiffiter,terminate;
576  bool reactivated=false;
577 
578  float64_t *selcrit; /* buffer for sorting */
579  float64_t *aicache; /* buffer to keep one row of hessian */
580  QP qp; /* buffer for one quadratic program */
581 
582  epsilon_crit_org=learn_parm->epsilon_crit; /* save org */
584  learn_parm->epsilon_crit=2.0;
585  /* caching makes no sense for linear kernel */
586  }
587  learn_parm->epsilon_shrink=2;
588  (*maxdiff)=1;
589 
590  SG_DEBUG("totdoc:%d\n",totdoc)
591  chosen = SG_MALLOC(int32_t, totdoc);
592  last_suboptimal_at =SG_MALLOC(int32_t, totdoc);
593  key =SG_MALLOC(int32_t, totdoc+11);
594  selcrit =SG_MALLOC(float64_t, totdoc);
595  selexam =SG_MALLOC(int32_t, totdoc);
596  a_old =SG_MALLOC(float64_t, totdoc);
597  aicache =SG_MALLOC(float64_t, totdoc);
598  working2dnum =SG_MALLOC(int32_t, totdoc+11);
599  active2dnum =SG_MALLOC(int32_t, totdoc+11);
600  qp.opt_ce =SG_MALLOC(float64_t, learn_parm->svm_maxqpsize);
601  qp.opt_ce0 =SG_MALLOC(float64_t, 1);
602  qp.opt_g =SG_MALLOC(float64_t, learn_parm->svm_maxqpsize*learn_parm->svm_maxqpsize);
603  qp.opt_g0 =SG_MALLOC(float64_t, learn_parm->svm_maxqpsize);
604  qp.opt_xinit =SG_MALLOC(float64_t, learn_parm->svm_maxqpsize);
605  qp.opt_low=SG_MALLOC(float64_t, learn_parm->svm_maxqpsize);
606  qp.opt_up=SG_MALLOC(float64_t, learn_parm->svm_maxqpsize);
607 
608  choosenum=0;
609  inconsistentnum=0;
610  if(!retrain) retrain=1;
611  iteration=1;
612  bestmaxdiffiter=1;
613  bestmaxdiff=999999999;
614  worstmaxdiff=1e-10;
615  terminate=0;
616 
617 
618  kernel->set_time(iteration); /* for lru cache */
619 
620  for (i=0;i<totdoc;i++) { /* various inits */
621  chosen[i]=0;
622  a_old[i]=a[i];
623  last_suboptimal_at[i]=1;
624  if(inconsistent[i])
625  inconsistentnum++;
626  }
627  activenum=compute_index(shrink_state->active,totdoc,active2dnum);
628  inactivenum=totdoc-activenum;
629  clear_index(working2dnum);
630 
631  /* repeat this loop until we have convergence */
632  CTime start_time;
633  mkl_converged=false;
634 
635 
636 #ifdef CYGWIN
637  for (;((iteration<100 || (!mkl_converged && callback) ) || (retrain && (!terminate))); iteration++){
638 #else
640  for (;((!CSignal::cancel_computations()) && ((iteration<3 || (!mkl_converged && callback) ) || (retrain && (!terminate)))); iteration++){
641 #endif
642 
643  if(use_kernel_cache)
644  kernel->set_time(iteration); /* for lru cache */
645 
646  if(verbosity>=2) t0=get_runtime();
647  if(verbosity>=3) {
648  SG_DEBUG("\nSelecting working set... ")
649  }
650 
651  if(learn_parm->svm_newvarsinqp>learn_parm->svm_maxqpsize)
652  learn_parm->svm_newvarsinqp=learn_parm->svm_maxqpsize;
653 
654  i=0;
655  for (jj=0;(j=working2dnum[jj])>=0;jj++) { /* clear working set */
656  if((chosen[j]>=(learn_parm->svm_maxqpsize/
657  CMath::min(learn_parm->svm_maxqpsize,
658  learn_parm->svm_newvarsinqp)))
659  || (inconsistent[j])
660  || (j == heldout)) {
661  chosen[j]=0;
662  choosenum--;
663  }
664  else {
665  chosen[j]++;
666  working2dnum[i++]=j;
667  }
668  }
669  working2dnum[i]=-1;
670 
671  if(retrain == 2) {
672  choosenum=0;
673  for (jj=0;(j=working2dnum[jj])>=0;jj++) { /* fully clear working set */
674  chosen[j]=0;
675  }
676  clear_index(working2dnum);
677  for (i=0;i<totdoc;i++) { /* set inconsistent examples to zero (-i 1) */
678  if((inconsistent[i] || (heldout==i)) && (a[i] != 0.0)) {
679  chosen[i]=99999;
680  choosenum++;
681  a[i]=0;
682  }
683  }
684  if(learn_parm->biased_hyperplane) {
685  eq=0;
686  for (i=0;i<totdoc;i++) { /* make sure we fulfill equality constraint */
687  eq+=a[i]*label[i];
688  }
689  for (i=0;(i<totdoc) && (fabs(eq) > learn_parm->epsilon_a);i++) {
690  if((eq*label[i] > 0) && (a[i] > 0)) {
691  chosen[i]=88888;
692  choosenum++;
693  if((eq*label[i]) > a[i]) {
694  eq-=(a[i]*label[i]);
695  a[i]=0;
696  }
697  else {
698  a[i]-=(eq*label[i]);
699  eq=0;
700  }
701  }
702  }
703  }
704  compute_index(chosen,totdoc,working2dnum);
705  }
706  else
707  { /* select working set according to steepest gradient */
708  if(iteration % 101)
709  {
710  already_chosen=0;
711  if(CMath::min(learn_parm->svm_newvarsinqp, learn_parm->svm_maxqpsize-choosenum)>=4 &&
713  {
714  /* select part of the working set from cache */
715  already_chosen=select_next_qp_subproblem_grad(
716  label,a,lin,c,totdoc,
717  (int32_t)(CMath::min(learn_parm->svm_maxqpsize-choosenum,
718  learn_parm->svm_newvarsinqp)/2),
719  inconsistent,active2dnum,
720  working2dnum,selcrit,selexam,1,
721  key,chosen);
722  choosenum+=already_chosen;
723  }
725  label,a,lin,c,totdoc,
726  CMath::min(learn_parm->svm_maxqpsize-choosenum,
727  learn_parm->svm_newvarsinqp-already_chosen),
728  inconsistent,active2dnum,
729  working2dnum,selcrit,selexam,0,key,
730  chosen);
731  }
732  else { /* once in a while, select a somewhat random working set
733  to get unlocked of infinite loops due to numerical
734  inaccuracies in the core qp-solver */
736  label,a,lin,c,totdoc,
737  CMath::min(learn_parm->svm_maxqpsize-choosenum,
738  learn_parm->svm_newvarsinqp),
739  inconsistent,active2dnum,
740  working2dnum,selcrit,selexam,key,
741  chosen,iteration);
742  }
743  }
744 
745  if(verbosity>=2) {
746  SG_INFO(" %ld vectors chosen\n",choosenum)
747  }
748 
749  if(verbosity>=2) t1=get_runtime();
750 
751  if (use_kernel_cache)
752  {
753  // in case of MKL w/o linadd cache each kernel independently
754  // else if linadd is disabled cache single kernel
755  if ( callback &&
756  (!((CCombinedKernel*) kernel)->get_append_subkernel_weights())
757  )
758  {
760  for (index_t k_idx=0; k_idx<k->get_num_kernels(); k_idx++)
761  {
762  CKernel* kn = k->get_kernel(k_idx);
763  kn->cache_multiple_kernel_rows(working2dnum, choosenum);
764  SG_UNREF(kn);
765  }
766  }
767  else
768  kernel->cache_multiple_kernel_rows(working2dnum, choosenum);
769  }
770 
771  if(verbosity>=2) t2=get_runtime();
772 
773  if(retrain != 2) {
774  optimize_svm(docs,label,inconsistent,0.0,chosen,active2dnum,
775  totdoc,working2dnum,choosenum,a,lin,c,
776  aicache,&qp,&epsilon_crit_org);
777  }
778 
779  if(verbosity>=2) t3=get_runtime();
780  update_linear_component(docs,label,active2dnum,a,a_old,working2dnum,totdoc,
781  lin,aicache,c);
782 
783  if(verbosity>=2) t4=get_runtime();
784  supvecnum=calculate_svm_model(docs,label,lin,a,a_old,c,working2dnum,active2dnum);
785 
786  if(verbosity>=2) t5=get_runtime();
787 
788  for (jj=0;(i=working2dnum[jj])>=0;jj++) {
789  a_old[i]=a[i];
790  }
791 
792  retrain=check_optimality(label,a,lin,c,totdoc,
793  maxdiff,epsilon_crit_org,&misclassified,
794  inconsistent,active2dnum,last_suboptimal_at,
795  iteration);
796 
797  if(verbosity>=2) {
798  t6=get_runtime();
799  timing_profile->time_select+=t1-t0;
800  timing_profile->time_kernel+=t2-t1;
801  timing_profile->time_opti+=t3-t2;
802  timing_profile->time_update+=t4-t3;
803  timing_profile->time_model+=t5-t4;
804  timing_profile->time_check+=t6-t5;
805  }
806 
807  /* checking whether optimizer got stuck */
808  if((*maxdiff) < bestmaxdiff) {
809  bestmaxdiff=(*maxdiff);
810  bestmaxdiffiter=iteration;
811  }
812  if(iteration > (bestmaxdiffiter+learn_parm->maxiter)) {
813  /* int32_t time no progress? */
814  terminate=1;
815  retrain=0;
816  SG_WARNING("Relaxing KT-Conditions due to slow progress! Terminating!\n")
817  SG_DEBUG("(iteration :%d, bestmaxdiffiter: %d, lean_param->maxiter: %d)\n", iteration, bestmaxdiffiter, learn_parm->maxiter )
818  }
819 
820  noshrink= (get_shrinking_enabled()) ? 0 : 1;
821 
822  if ((!callback) && (!retrain) && (inactivenum>0) &&
823  ((!learn_parm->skip_final_opt_check) || (kernel->has_property(KP_LINADD) && get_linadd_enabled())))
824  {
825  t1=get_runtime();
826  SG_DEBUG("reactivating inactive examples\n")
827 
828  reactivate_inactive_examples(label,a,shrink_state,lin,c,totdoc,
829  iteration,inconsistent,
830  docs,aicache,
831  maxdiff);
832  reactivated=true;
833  SG_DEBUG("done reactivating inactive examples (maxdiff:%8f eps_crit:%8f orig_eps:%8f)\n", *maxdiff, learn_parm->epsilon_crit, epsilon_crit_org)
834  /* Update to new active variables. */
835  activenum=compute_index(shrink_state->active,totdoc,active2dnum);
836  inactivenum=totdoc-activenum;
837  /* reset watchdog */
838  bestmaxdiff=(*maxdiff);
839  bestmaxdiffiter=iteration;
840 
841  /* termination criterion */
842  noshrink=1;
843  retrain=0;
844  if((*maxdiff) > learn_parm->epsilon_crit)
845  {
846  SG_INFO("restarting optimization as we are - due to shrinking - deviating too much (maxdiff(%f) > eps(%f))\n", *maxdiff, learn_parm->epsilon_crit)
847  retrain=1;
848  }
849  timing_profile->time_shrink+=get_runtime()-t1;
850  if (((verbosity>=1) && (!(kernel->has_property(KP_LINADD) && get_linadd_enabled())))
851  || (verbosity>=2)) {
852  SG_DONE()
853  SG_DEBUG("Number of inactive variables = %ld\n", inactivenum)
854  }
855  }
856 
857  if((!retrain) && (learn_parm->epsilon_crit>(*maxdiff)))
858  learn_parm->epsilon_crit=(*maxdiff);
859  if((!retrain) && (learn_parm->epsilon_crit>epsilon_crit_org)) {
860  learn_parm->epsilon_crit/=4.0;
861  retrain=1;
862  noshrink=1;
863  }
864  if(learn_parm->epsilon_crit<epsilon_crit_org)
865  learn_parm->epsilon_crit=epsilon_crit_org;
866 
867  if(verbosity>=2) {
868  SG_INFO(" => (%ld SV (incl. %ld SV at u-bound), max violation=%.5f)\n",
869  supvecnum,model->at_upper_bound,(*maxdiff));
870 
871  }
872  mymaxdiff=*maxdiff ;
873 
874  //don't shrink w/ mkl
875  if (((iteration % 10) == 0) && (!noshrink) && !callback)
876  {
877  activenum=shrink_problem(shrink_state,active2dnum,last_suboptimal_at,iteration,totdoc,
878  CMath::max((int32_t)(activenum/10),
879  CMath::max((int32_t)(totdoc/500),(int32_t) 100)),
880  a,inconsistent, c, lin, label);
881 
882  inactivenum=totdoc-activenum;
883 
884  if (use_kernel_cache && (supvecnum>kernel->get_max_elems_cache())
885  && ((kernel->get_activenum_cache()-activenum)>CMath::max((int32_t)(activenum/10),(int32_t) 500))) {
886 
887  kernel->kernel_cache_shrink(totdoc, CMath::min((int32_t) (kernel->get_activenum_cache()-activenum),
888  (int32_t) (kernel->get_activenum_cache()-supvecnum)),
889  shrink_state->active);
890  }
891  }
892 
893  if (bestmaxdiff>worstmaxdiff)
894  worstmaxdiff=bestmaxdiff;
895 
896  SG_ABS_PROGRESS(bestmaxdiff, -CMath::log10(bestmaxdiff), -CMath::log10(worstmaxdiff), -CMath::log10(epsilon), 6)
897 
898  /* Terminate loop */
899  if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) {
900  terminate = 1;
901  retrain = 0;
902  }
903 
904  } /* end of loop */
905 
906  SG_DEBUG("inactive:%d\n", inactivenum)
907 
908  if (inactivenum && !reactivated && !callback)
909  {
910  SG_DEBUG("reactivating inactive examples\n")
911  reactivate_inactive_examples(label,a,shrink_state,lin,c,totdoc,
912  iteration,inconsistent,
913  docs,aicache,
914  maxdiff);
915  SG_DEBUG("done reactivating inactive examples\n")
916  /* Update to new active variables. */
917  activenum=compute_index(shrink_state->active,totdoc,active2dnum);
918  inactivenum=totdoc-activenum;
919  /* reset watchdog */
920  bestmaxdiff=(*maxdiff);
921  bestmaxdiffiter=iteration;
922  }
923 
924  //use this for our purposes!
925  criterion=compute_objective_function(a,lin,c,learn_parm->eps,label,totdoc);
926  CSVM::set_objective(criterion);
927 
928  SG_FREE(chosen);
929  SG_FREE(last_suboptimal_at);
930  SG_FREE(key);
931  SG_FREE(selcrit);
932  SG_FREE(selexam);
933  SG_FREE(a_old);
934  SG_FREE(aicache);
935  SG_FREE(working2dnum);
936  SG_FREE(active2dnum);
937  SG_FREE(qp.opt_ce);
938  SG_FREE(qp.opt_ce0);
939  SG_FREE(qp.opt_g);
940  SG_FREE(qp.opt_g0);
941  SG_FREE(qp.opt_xinit);
942  SG_FREE(qp.opt_low);
943  SG_FREE(qp.opt_up);
944 
945  learn_parm->epsilon_crit=epsilon_crit_org; /* restore org */
946 
947  return(iteration);
948 }
949 
951  float64_t *a, float64_t *lin, float64_t *c, float64_t* eps, int32_t *label,
952  int32_t totdoc)
953  /* Return value of objective function. */
954  /* Works only relative to the active variables! */
955 {
956  /* calculate value of objective function */
957  float64_t criterion=0;
958 
959  for (int32_t i=0;i<totdoc;i++)
960  criterion=criterion+(eps[i]-(float64_t)label[i]*c[i])*a[i]+0.5*a[i]*label[i]*lin[i];
961 
962  return(criterion);
963 }
964 
965 
966 void CSVMLight::clear_index(int32_t *index)
967  /* initializes and empties index */
968 {
969  index[0]=-1;
970 }
971 
972 void CSVMLight::add_to_index(int32_t *index, int32_t elem)
973  /* initializes and empties index */
974 {
975  register int32_t i;
976  for (i=0;index[i] != -1;i++);
977  index[i]=elem;
978  index[i+1]=-1;
979 }
980 
982  int32_t *binfeature, int32_t range, int32_t *index)
983  /* create an inverted index of binfeature */
984 {
985  register int32_t i,ii;
986 
987  ii=0;
988  for (i=0;i<range;i++) {
989  if(binfeature[i]) {
990  index[ii]=i;
991  ii++;
992  }
993  }
994  for (i=0;i<4;i++) {
995  index[ii+i]=-1;
996  }
997  return(ii);
998 }
999 
1000 
1002  int32_t* docs, int32_t* label, int32_t *exclude_from_eq_const,
1003  float64_t eq_target, int32_t *chosen, int32_t *active2dnum, int32_t totdoc,
1004  int32_t *working2dnum, int32_t varnum, float64_t *a, float64_t *lin,
1005  float64_t *c, float64_t *aicache, QP *qp, float64_t *epsilon_crit_target)
1006  /* Do optimization on the working set. */
1007 {
1008  int32_t i;
1009  float64_t *a_v;
1010 
1011  //compute_matrices_for_optimization_parallel(docs,label,
1012  // exclude_from_eq_const,eq_target,chosen,
1013  // active2dnum,working2dnum,a,lin,c,
1014  // varnum,totdoc,aicache,qp);
1015 
1017  exclude_from_eq_const,eq_target,chosen,
1018  active2dnum,working2dnum,a,lin,c,
1019  varnum,totdoc,aicache,qp);
1020 
1021  if(verbosity>=3) {
1022  SG_DEBUG("Running optimizer...")
1023  }
1024  /* call the qp-subsolver */
1025  a_v=optimize_qp(qp,epsilon_crit_target,
1026  learn_parm->svm_maxqpsize,
1027  &(model->b), /* in case the optimizer gives us */
1028  learn_parm->svm_maxqpsize); /* the threshold for free. otherwise */
1029  /* b is calculated in calculate_model. */
1030  if(verbosity>=3) {
1031  SG_DONE()
1032  }
1033 
1034  for (i=0;i<varnum;i++)
1035  a[working2dnum[i]]=a_v[i];
1036 }
1037 
1039  int32_t* docs, int32_t* label, int32_t *exclude_from_eq_const,
1040  float64_t eq_target, int32_t *chosen, int32_t *active2dnum, int32_t *key,
1041  float64_t *a, float64_t *lin, float64_t *c, int32_t varnum, int32_t totdoc,
1042  float64_t *aicache, QP *qp)
1043 {
1044  if (parallel->get_num_threads()<=1)
1045  {
1046  compute_matrices_for_optimization(docs, label, exclude_from_eq_const, eq_target,
1047  chosen, active2dnum, key, a, lin, c,
1048  varnum, totdoc, aicache, qp) ;
1049  }
1050 #ifdef HAVE_PTHREAD
1051  else
1052  {
1053  register int32_t ki,kj,i,j;
1054  register float64_t kernel_temp;
1055 
1056  qp->opt_n=varnum;
1057  qp->opt_ce0[0]=-eq_target; /* compute the constant for equality constraint */
1058  for (j=1;j<model->sv_num;j++) { /* start at 1 */
1059  if((!chosen[model->supvec[j]])
1060  && (!exclude_from_eq_const[(model->supvec[j])])) {
1061  qp->opt_ce0[0]+=model->alpha[j];
1062  }
1063  }
1064  if(learn_parm->biased_hyperplane)
1065  qp->opt_m=1;
1066  else
1067  qp->opt_m=0; /* eq-constraint will be ignored */
1068 
1069  /* init linear part of objective function */
1070  for (i=0;i<varnum;i++) {
1071  qp->opt_g0[i]=lin[key[i]];
1072  }
1073 
1075  int32_t *KI=SG_MALLOC(int32_t, varnum*varnum);
1076  int32_t *KJ=SG_MALLOC(int32_t, varnum*varnum);
1077  int32_t Knum=0 ;
1078  float64_t *Kval = SG_MALLOC(float64_t, varnum*(varnum+1)/2);
1079  for (i=0;i<varnum;i++) {
1080  ki=key[i];
1081  KI[Knum]=docs[ki] ;
1082  KJ[Knum]=docs[ki] ;
1083  Knum++ ;
1084  for (j=i+1;j<varnum;j++)
1085  {
1086  kj=key[j];
1087  KI[Knum]=docs[ki] ;
1088  KJ[Knum]=docs[kj] ;
1089  Knum++ ;
1090  }
1091  }
1092  ASSERT(Knum<=varnum*(varnum+1)/2)
1093 
1094  pthread_t* threads = SG_MALLOC(pthread_t, parallel->get_num_threads()-1);
1095  S_THREAD_PARAM_KERNEL* params = SG_MALLOC(S_THREAD_PARAM_KERNEL, parallel->get_num_threads()-1);
1096  int32_t step= Knum/parallel->get_num_threads();
1097  //SG_DEBUG("\nkernel-step size: %i\n", step)
1098  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1099  {
1100  params[t].svmlight = this;
1101  params[t].start = t*step;
1102  params[t].end = (t+1)*step;
1103  params[t].KI=KI ;
1104  params[t].KJ=KJ ;
1105  params[t].Kval=Kval ;
1106  pthread_create(&threads[t], NULL, CSVMLight::compute_kernel_helper, (void*)&params[t]);
1107  }
1108  for (i=params[parallel->get_num_threads()-2].end; i<Knum; i++)
1109  Kval[i]=compute_kernel(KI[i],KJ[i]) ;
1110 
1111  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1112  pthread_join(threads[t], NULL);
1113 
1114  SG_FREE(params);
1115  SG_FREE(threads);
1116 
1117  Knum=0 ;
1118  for (i=0;i<varnum;i++) {
1119  ki=key[i];
1120 
1121  /* Compute the matrix for equality constraints */
1122  qp->opt_ce[i]=label[ki];
1123  qp->opt_low[i]=0;
1124  qp->opt_up[i]=learn_parm->svm_cost[ki];
1125 
1126  kernel_temp=Kval[Knum] ;
1127  Knum++ ;
1128  /* compute linear part of objective function */
1129  qp->opt_g0[i]-=(kernel_temp*a[ki]*(float64_t)label[ki]);
1130  /* compute quadratic part of objective function */
1131  qp->opt_g[varnum*i+i]=kernel_temp;
1132 
1133  for (j=i+1;j<varnum;j++) {
1134  kj=key[j];
1135  kernel_temp=Kval[Knum] ;
1136  Knum++ ;
1137  /* compute linear part of objective function */
1138  qp->opt_g0[i]-=(kernel_temp*a[kj]*(float64_t)label[kj]);
1139  qp->opt_g0[j]-=(kernel_temp*a[ki]*(float64_t)label[ki]);
1140  /* compute quadratic part of objective function */
1141  qp->opt_g[varnum*i+j]=(float64_t)label[ki]*(float64_t)label[kj]*kernel_temp;
1142  qp->opt_g[varnum*j+i]=qp->opt_g[varnum*i+j];//(float64_t)label[ki]*(float64_t)label[kj]*kernel_temp;
1143  }
1144 
1145  if(verbosity>=3) {
1146  if(i % 20 == 0) {
1147  SG_DEBUG("%ld..",i)
1148  }
1149  }
1150  }
1151 
1152  SG_FREE(KI);
1153  SG_FREE(KJ);
1154  SG_FREE(Kval);
1155 
1156  for (i=0;i<varnum;i++) {
1157  /* assure starting at feasible point */
1158  qp->opt_xinit[i]=a[key[i]];
1159  /* set linear part of objective function */
1160  qp->opt_g0[i]=(learn_parm->eps[key[i]]-(float64_t)label[key[i]]*c[key[i]])+qp->opt_g0[i]*(float64_t)label[key[i]];
1161  }
1162 
1163  if(verbosity>=3) {
1164  SG_DONE()
1165  }
1166  }
1167 #endif
1168 }
1169 
1171  int32_t* docs, int32_t* label, int32_t *exclude_from_eq_const,
1172  float64_t eq_target, int32_t *chosen, int32_t *active2dnum, int32_t *key,
1173  float64_t *a, float64_t *lin, float64_t *c, int32_t varnum, int32_t totdoc,
1174  float64_t *aicache, QP *qp)
1175 {
1176  register int32_t ki,kj,i,j;
1177  register float64_t kernel_temp;
1178 
1179  qp->opt_n=varnum;
1180  qp->opt_ce0[0]=-eq_target; /* compute the constant for equality constraint */
1181  for (j=1;j<model->sv_num;j++) { /* start at 1 */
1182  if((!chosen[model->supvec[j]])
1183  && (!exclude_from_eq_const[(model->supvec[j])])) {
1184  qp->opt_ce0[0]+=model->alpha[j];
1185  }
1186  }
1187  if(learn_parm->biased_hyperplane)
1188  qp->opt_m=1;
1189  else
1190  qp->opt_m=0; /* eq-constraint will be ignored */
1191 
1192  /* init linear part of objective function */
1193  for (i=0;i<varnum;i++) {
1194  qp->opt_g0[i]=lin[key[i]];
1195  }
1196 
1197  for (i=0;i<varnum;i++) {
1198  ki=key[i];
1199 
1200  /* Compute the matrix for equality constraints */
1201  qp->opt_ce[i]=label[ki];
1202  qp->opt_low[i]=0;
1203  qp->opt_up[i]=learn_parm->svm_cost[ki];
1204 
1205  kernel_temp=compute_kernel(docs[ki], docs[ki]);
1206  /* compute linear part of objective function */
1207  qp->opt_g0[i]-=(kernel_temp*a[ki]*(float64_t)label[ki]);
1208  /* compute quadratic part of objective function */
1209  qp->opt_g[varnum*i+i]=kernel_temp;
1210 
1211  for (j=i+1;j<varnum;j++) {
1212  kj=key[j];
1213  kernel_temp=compute_kernel(docs[ki], docs[kj]);
1214 
1215  /* compute linear part of objective function */
1216  qp->opt_g0[i]-=(kernel_temp*a[kj]*(float64_t)label[kj]);
1217  qp->opt_g0[j]-=(kernel_temp*a[ki]*(float64_t)label[ki]);
1218  /* compute quadratic part of objective function */
1219  qp->opt_g[varnum*i+j]=(float64_t)label[ki]*(float64_t)label[kj]*kernel_temp;
1220  qp->opt_g[varnum*j+i]=qp->opt_g[varnum*i+j];//(float64_t)label[ki]*(float64_t)label[kj]*kernel_temp;
1221  }
1222 
1223  if(verbosity>=3) {
1224  if(i % 20 == 0) {
1225  SG_DEBUG("%ld..",i)
1226  }
1227  }
1228  }
1229 
1230  for (i=0;i<varnum;i++) {
1231  /* assure starting at feasible point */
1232  qp->opt_xinit[i]=a[key[i]];
1233  /* set linear part of objective function */
1234  qp->opt_g0[i]=(learn_parm->eps[key[i]]-(float64_t)label[key[i]]*c[key[i]]) + qp->opt_g0[i]*(float64_t)label[key[i]];
1235  }
1236 
1237  if(verbosity>=3) {
1238  SG_DONE()
1239  }
1240 }
1241 
1242 
1244  int32_t* docs, int32_t *label, float64_t *lin, float64_t *a,
1245  float64_t *a_old, float64_t *c, int32_t *working2dnum, int32_t *active2dnum)
1246  /* Compute decision function based on current values */
1247  /* of alpha. */
1248 {
1249  int32_t i,ii,pos,b_calculated=0,first_low,first_high;
1250  float64_t ex_c,b_temp,b_low,b_high;
1251 
1252  if(verbosity>=3) {
1253  SG_DEBUG("Calculating model...")
1254  }
1255 
1256  if(!learn_parm->biased_hyperplane) {
1257  model->b=0;
1258  b_calculated=1;
1259  }
1260 
1261  for (ii=0;(i=working2dnum[ii])>=0;ii++) {
1262  if((a_old[i]>0) && (a[i]==0)) { /* remove from model */
1263  pos=model->index[i];
1264  model->index[i]=-1;
1265  (model->sv_num)--;
1266  model->supvec[pos]=model->supvec[model->sv_num];
1267  model->alpha[pos]=model->alpha[model->sv_num];
1268  model->index[model->supvec[pos]]=pos;
1269  }
1270  else if((a_old[i]==0) && (a[i]>0)) { /* add to model */
1271  model->supvec[model->sv_num]=docs[i];
1272  model->alpha[model->sv_num]=a[i]*(float64_t)label[i];
1273  model->index[i]=model->sv_num;
1274  (model->sv_num)++;
1275  }
1276  else if(a_old[i]==a[i]) { /* nothing to do */
1277  }
1278  else { /* just update alpha */
1279  model->alpha[model->index[i]]=a[i]*(float64_t)label[i];
1280  }
1281 
1282  ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;
1283  if((a_old[i]>=ex_c) && (a[i]<ex_c)) {
1284  (model->at_upper_bound)--;
1285  }
1286  else if((a_old[i]<ex_c) && (a[i]>=ex_c)) {
1287  (model->at_upper_bound)++;
1288  }
1289 
1290  if((!b_calculated)
1291  && (a[i]>learn_parm->epsilon_a) && (a[i]<ex_c)) { /* calculate b */
1292  model->b=((float64_t)label[i]*learn_parm->eps[i]-c[i]+lin[i]);
1293  b_calculated=1;
1294  }
1295  }
1296 
1297  /* No alpha in the working set not at bounds, so b was not
1298  calculated in the usual way. The following handles this special
1299  case. */
1300  if(learn_parm->biased_hyperplane
1301  && (!b_calculated)
1302  && (model->sv_num-1 == model->at_upper_bound)) {
1303  first_low=1;
1304  first_high=1;
1305  b_low=0;
1306  b_high=0;
1307  for (ii=0;(i=active2dnum[ii])>=0;ii++) {
1308  ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;
1309  if(a[i]<ex_c) {
1310  if(label[i]>0) {
1311  b_temp=-(learn_parm->eps[i]-c[i]+lin[i]);
1312  if((b_temp>b_low) || (first_low)) {
1313  b_low=b_temp;
1314  first_low=0;
1315  }
1316  }
1317  else {
1318  b_temp=-(-learn_parm->eps[i]-c[i]+lin[i]);
1319  if((b_temp<b_high) || (first_high)) {
1320  b_high=b_temp;
1321  first_high=0;
1322  }
1323  }
1324  }
1325  else {
1326  if(label[i]<0) {
1327  b_temp=-(-learn_parm->eps[i]-c[i]+lin[i]);
1328  if((b_temp>b_low) || (first_low)) {
1329  b_low=b_temp;
1330  first_low=0;
1331  }
1332  }
1333  else {
1334  b_temp=-(learn_parm->eps[i]-c[i]+lin[i]);
1335  if((b_temp<b_high) || (first_high)) {
1336  b_high=b_temp;
1337  first_high=0;
1338  }
1339  }
1340  }
1341  }
1342  if(first_high) {
1343  model->b=-b_low;
1344  }
1345  else if(first_low) {
1346  model->b=-b_high;
1347  }
1348  else {
1349  model->b=-(b_high+b_low)/2.0; /* select b as the middle of range */
1350  }
1351  }
1352 
1353  if(verbosity>=3) {
1354  SG_DONE()
1355  }
1356 
1357  return(model->sv_num-1); /* have to substract one, since element 0 is empty*/
1358 }
1359 
1361  int32_t* label, float64_t *a, float64_t *lin, float64_t *c, int32_t totdoc,
1362  float64_t *maxdiff, float64_t epsilon_crit_org, int32_t *misclassified,
1363  int32_t *inconsistent, int32_t *active2dnum, int32_t *last_suboptimal_at,
1364  int32_t iteration)
1365  /* Check KT-conditions */
1366 {
1367  int32_t i,ii,retrain;
1368  float64_t dist,ex_c,target;
1369 
1371  learn_parm->epsilon_shrink=-learn_parm->epsilon_crit+epsilon_crit_org;
1372  else
1373  learn_parm->epsilon_shrink=learn_parm->epsilon_shrink*0.7+(*maxdiff)*0.3;
1374  retrain=0;
1375  (*maxdiff)=0;
1376  (*misclassified)=0;
1377  for (ii=0;(i=active2dnum[ii])>=0;ii++) {
1378  if((!inconsistent[i]) && label[i]) {
1379  dist=(lin[i]-model->b)*(float64_t)label[i];/* 'distance' from
1380  hyperplane*/
1381  target=-(learn_parm->eps[i]-(float64_t)label[i]*c[i]);
1382  ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;
1383  if(dist <= 0) {
1384  (*misclassified)++; /* does not work due to deactivation of var */
1385  }
1386  if((a[i]>learn_parm->epsilon_a) && (dist > target)) {
1387  if((dist-target)>(*maxdiff)) /* largest violation */
1388  (*maxdiff)=dist-target;
1389  }
1390  else if((a[i]<ex_c) && (dist < target)) {
1391  if((target-dist)>(*maxdiff)) /* largest violation */
1392  (*maxdiff)=target-dist;
1393  }
1394  /* Count how int32_t a variable was at lower/upper bound (and optimal).*/
1395  /* Variables, which were at the bound and optimal for a int32_t */
1396  /* time are unlikely to become support vectors. In case our */
1397  /* cache is filled up, those variables are excluded to save */
1398  /* kernel evaluations. (See chapter 'Shrinking').*/
1399  if((a[i]>(learn_parm->epsilon_a))
1400  && (a[i]<ex_c)) {
1401  last_suboptimal_at[i]=iteration; /* not at bound */
1402  }
1403  else if((a[i]<=(learn_parm->epsilon_a))
1404  && (dist < (target+learn_parm->epsilon_shrink))) {
1405  last_suboptimal_at[i]=iteration; /* not likely optimal */
1406  }
1407  else if((a[i]>=ex_c)
1408  && (dist > (target-learn_parm->epsilon_shrink))) {
1409  last_suboptimal_at[i]=iteration; /* not likely optimal */
1410  }
1411  }
1412  }
1413 
1414  /* termination criterion */
1415  if((!retrain) && ((*maxdiff) > learn_parm->epsilon_crit)) {
1416  retrain=1;
1417  }
1418  return(retrain);
1419 }
1420 
1422  int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
1423  float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
1424  float64_t *aicache, float64_t* c)
1425  /* keep track of the linear component */
1426  /* lin of the gradient etc. by updating */
1427  /* based on the change of the variables */
1428  /* in the current working set */
1429 {
1430  register int32_t i=0,ii=0,j=0,jj=0;
1431 
1433  {
1434  if (callback)
1435  {
1436  update_linear_component_mkl_linadd(docs, label, active2dnum, a,
1437  a_old, working2dnum, totdoc, lin, aicache);
1438  }
1439  else
1440  {
1441  kernel->clear_normal();
1442 
1443  int32_t num_working=0;
1444  for (ii=0;(i=working2dnum[ii])>=0;ii++) {
1445  if(a[i] != a_old[i]) {
1446  kernel->add_to_normal(docs[i], (a[i]-a_old[i])*(float64_t)label[i]);
1447  num_working++;
1448  }
1449  }
1450 
1451  if (num_working>0)
1452  {
1453  if (parallel->get_num_threads() < 2)
1454  {
1455  for (jj=0;(j=active2dnum[jj])>=0;jj++) {
1456  lin[j]+=kernel->compute_optimized(docs[j]);
1457  }
1458  }
1459 #ifdef HAVE_PTHREAD
1460  else
1461  {
1462  int32_t num_elem = 0 ;
1463  for (jj=0;(j=active2dnum[jj])>=0;jj++) num_elem++ ;
1464 
1465  pthread_t* threads = SG_MALLOC(pthread_t, parallel->get_num_threads()-1);
1466  S_THREAD_PARAM_SVMLIGHT* params = SG_MALLOC(S_THREAD_PARAM_SVMLIGHT, parallel->get_num_threads()-1);
1467  int32_t start = 0 ;
1468  int32_t step = num_elem/parallel->get_num_threads();
1469  int32_t end = step ;
1470 
1471  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1472  {
1473  params[t].kernel = kernel ;
1474  params[t].lin = lin ;
1475  params[t].docs = docs ;
1476  params[t].active2dnum=active2dnum ;
1477  params[t].start = start ;
1478  params[t].end = end ;
1479  start=end ;
1480  end+=step ;
1481  pthread_create(&threads[t], NULL, update_linear_component_linadd_helper, (void*)&params[t]) ;
1482  }
1483 
1484  for (jj=params[parallel->get_num_threads()-2].end;(j=active2dnum[jj])>=0;jj++) {
1485  lin[j]+=kernel->compute_optimized(docs[j]);
1486  }
1487  void* ret;
1488  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1489  pthread_join(threads[t], &ret) ;
1490 
1491  SG_FREE(params);
1492  SG_FREE(threads);
1493  }
1494 #endif
1495  }
1496  }
1497  }
1498  else
1499  {
1500  if (callback)
1501  {
1502  update_linear_component_mkl(docs, label, active2dnum,
1503  a, a_old, working2dnum, totdoc, lin, aicache);
1504  }
1505  else {
1506  for (jj=0;(i=working2dnum[jj])>=0;jj++) {
1507  if(a[i] != a_old[i]) {
1508  kernel->get_kernel_row(i,active2dnum,aicache);
1509  for (ii=0;(j=active2dnum[ii])>=0;ii++)
1510  lin[j]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
1511  }
1512  }
1513  }
1514  }
1515 }
1516 
1517 
1519  int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
1520  float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
1521  float64_t *aicache)
1522 {
1523  //int inner_iters=0;
1524  int32_t num = kernel->get_num_vec_rhs();
1525  int32_t num_weights = -1;
1526  int32_t num_kernels = kernel->get_num_subkernels() ;
1527  const float64_t* old_beta = kernel->get_subkernel_weights(num_weights);
1528  ASSERT(num_weights==num_kernels)
1529 
1530  if ((kernel->get_kernel_type()==K_COMBINED) &&
1531  (!((CCombinedKernel*) kernel)->get_append_subkernel_weights()))// for combined kernel
1532  {
1534 
1535  int32_t n = 0, i, j ;
1536 
1537  for (index_t k_idx=0; k_idx<k->get_num_kernels(); k_idx++)
1538  {
1539  CKernel* kn = k->get_kernel(k_idx);
1540  for (i=0;i<num;i++)
1541  {
1542  if(a[i] != a_old[i])
1543  {
1544  kn->get_kernel_row(i,NULL,aicache, true);
1545  for (j=0;j<num;j++)
1546  W[j*num_kernels+n]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
1547  }
1548  }
1549 
1550  SG_UNREF(kn);
1551  n++ ;
1552  }
1553  }
1554  else // hope the kernel is fast ...
1555  {
1556  float64_t* w_backup = SG_MALLOC(float64_t, num_kernels);
1557  float64_t* w1 = SG_MALLOC(float64_t, num_kernels);
1558 
1559  // backup and set to zero
1560  for (int32_t i=0; i<num_kernels; i++)
1561  {
1562  w_backup[i] = old_beta[i] ;
1563  w1[i]=0.0 ;
1564  }
1565  for (int32_t n=0; n<num_kernels; n++)
1566  {
1567  w1[n]=1.0 ;
1569 
1570  for (int32_t i=0;i<num;i++)
1571  {
1572  if(a[i] != a_old[i])
1573  {
1574  for (int32_t j=0;j<num;j++)
1575  W[j*num_kernels+n]+=(a[i]-a_old[i])*compute_kernel(i,j)*(float64_t)label[i];
1576  }
1577  }
1578  w1[n]=0.0 ;
1579  }
1580 
1581  // restore old weights
1582  kernel->set_subkernel_weights(SGVector<float64_t>(w_backup,num_weights));
1583 
1584  SG_FREE(w_backup);
1585  SG_FREE(w1);
1586  }
1587 
1588  call_mkl_callback(a, label, lin);
1589 }
1590 
1591 
1593  int32_t* docs, int32_t* label, int32_t *active2dnum, float64_t *a,
1594  float64_t *a_old, int32_t *working2dnum, int32_t totdoc, float64_t *lin,
1595  float64_t *aicache)
1596 {
1597  //int inner_iters=0;
1598 
1599  // kernel with LP_LINADD property is assumed to have
1600  // compute_by_subkernel functions
1601  int32_t num = kernel->get_num_vec_rhs();
1602  int32_t num_weights = -1;
1603  int32_t num_kernels = kernel->get_num_subkernels() ;
1604  const float64_t* old_beta = kernel->get_subkernel_weights(num_weights);
1605  ASSERT(num_weights==num_kernels)
1606 
1607  float64_t* w_backup = SG_MALLOC(float64_t, num_kernels);
1608  float64_t* w1 = SG_MALLOC(float64_t, num_kernels);
1609 
1610  // backup and set to one
1611  for (int32_t i=0; i<num_kernels; i++)
1612  {
1613  w_backup[i] = old_beta[i] ;
1614  w1[i]=1.0 ;
1615  }
1616  // set the kernel weights
1618 
1619  // create normal update (with changed alphas only)
1620  kernel->clear_normal();
1621  for (int32_t ii=0, i=0;(i=working2dnum[ii])>=0;ii++) {
1622  if(a[i] != a_old[i]) {
1623  kernel->add_to_normal(docs[i], (a[i]-a_old[i])*(float64_t)label[i]);
1624  }
1625  }
1626 
1627  if (parallel->get_num_threads() < 2)
1628  {
1629  // determine contributions of different kernels
1630  for (int32_t i=0; i<num; i++)
1631  kernel->compute_by_subkernel(i,&W[i*num_kernels]);
1632  }
1633 #ifdef HAVE_PTHREAD
1634  else
1635  {
1636  pthread_t* threads = SG_MALLOC(pthread_t, parallel->get_num_threads()-1);
1637  S_THREAD_PARAM_SVMLIGHT* params = SG_MALLOC(S_THREAD_PARAM_SVMLIGHT, parallel->get_num_threads()-1);
1638  int32_t step= num/parallel->get_num_threads();
1639 
1640  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1641  {
1642  params[t].kernel = kernel;
1643  params[t].W = W;
1644  params[t].start = t*step;
1645  params[t].end = (t+1)*step;
1646  pthread_create(&threads[t], NULL, CSVMLight::update_linear_component_mkl_linadd_helper, (void*)&params[t]);
1647  }
1648 
1649  for (int32_t i=params[parallel->get_num_threads()-2].end; i<num; i++)
1650  kernel->compute_by_subkernel(i,&W[i*num_kernels]);
1651 
1652  for (int32_t t=0; t<parallel->get_num_threads()-1; t++)
1653  pthread_join(threads[t], NULL);
1654 
1655  SG_FREE(params);
1656  SG_FREE(threads);
1657  }
1658 #endif
1659 
1660  // restore old weights
1661  kernel->set_subkernel_weights(SGVector<float64_t>(w_backup,num_weights));
1662 
1663  call_mkl_callback(a, label, lin);
1664 }
1665 
1667 {
1668  S_THREAD_PARAM_SVMLIGHT* params = (S_THREAD_PARAM_SVMLIGHT*) p;
1669 
1670  int32_t num_kernels=params->kernel->get_num_subkernels();
1671 
1672  // determine contributions of different kernels
1673  for (int32_t i=params->start; i<params->end; i++)
1674  params->kernel->compute_by_subkernel(i,&(params->W[i*num_kernels]));
1675 
1676  return NULL ;
1677 }
1678 
1679 void CSVMLight::call_mkl_callback(float64_t* a, int32_t* label, float64_t* lin)
1680 {
1681  int32_t num = kernel->get_num_vec_rhs();
1682  int32_t num_kernels = kernel->get_num_subkernels() ;
1683 
1684  float64_t suma=0;
1685  float64_t* sumw=SG_MALLOC(float64_t, num_kernels);
1686 #ifdef HAVE_LAPACK
1687  int nk = (int) num_kernels; /* calling external lib */
1688  double* alphay = SG_MALLOC(double, num);
1689 
1690  for (int32_t i=0; i<num; i++)
1691  {
1692  alphay[i]=a[i]*label[i];
1693  suma+=a[i];
1694  }
1695 
1696  for (int32_t i=0; i<num_kernels; i++)
1697  sumw[i]=0;
1698 
1699  cblas_dgemv(CblasColMajor, CblasNoTrans, num_kernels, (int) num, 0.5, (double*) W,
1700  num_kernels, alphay, 1, 1.0, (double*) sumw, 1);
1701 
1702  SG_FREE(alphay);
1703 #else
1704  for (int32_t i=0; i<num; i++)
1705  suma += a[i];
1706 
1707  for (int32_t d=0; d<num_kernels; d++)
1708  {
1709  sumw[d]=0;
1710  for (int32_t i=0; i<num; i++)
1711  sumw[d] += a[i]*(0.5*label[i]*W[i*num_kernels+d]);
1712  }
1713 #endif
1714 
1715  if (callback)
1716  mkl_converged=callback(mkl, sumw, suma);
1717 
1718 
1719  const float64_t* new_beta = kernel->get_subkernel_weights(num_kernels);
1720 
1721  // update lin
1722 #ifdef HAVE_LAPACK
1723  cblas_dgemv(CblasColMajor, CblasTrans, nk, (int) num, 1.0, (double*) W,
1724  nk, (double*) new_beta, 1, 0.0, (double*) lin, 1);
1725 #else
1726  for (int32_t i=0; i<num; i++)
1727  lin[i]=0 ;
1728  for (int32_t d=0; d<num_kernels; d++)
1729  if (new_beta[d]!=0)
1730  for (int32_t i=0; i<num; i++)
1731  lin[i] += new_beta[d]*W[i*num_kernels+d] ;
1732 #endif
1733 
1734  SG_FREE(sumw);
1735 }
1736 
1737 
1738 /*************************** Working set selection ***************************/
1739 
1741  int32_t* label, float64_t *a, float64_t *lin, float64_t *c, int32_t totdoc,
1742  int32_t qp_size, int32_t *inconsistent, int32_t *active2dnum,
1743  int32_t *working2dnum, float64_t *selcrit, int32_t *select,
1744  int32_t cache_only, int32_t *key, int32_t *chosen)
1745  /* Use the feasible direction approach to select the next
1746  qp-subproblem (see chapter 'Selecting a good working set'). If
1747  'cache_only' is true, then the variables are selected only among
1748  those for which the kernel evaluations are cached. */
1749 {
1750  int32_t choosenum,i,j,k,activedoc,inum,valid;
1751  float64_t s;
1752 
1753  for (inum=0;working2dnum[inum]>=0;inum++); /* find end of index */
1754  choosenum=0;
1755  activedoc=0;
1756  for (i=0;(j=active2dnum[i])>=0;i++) {
1757  s=-label[j];
1758  if(cache_only)
1759  {
1760  if (use_kernel_cache)
1761  valid=(kernel->kernel_cache_check(j));
1762  else
1763  valid = 1 ;
1764  }
1765  else
1766  valid=1;
1767  if(valid
1768  && (!((a[j]<=(0+learn_parm->epsilon_a)) && (s<0)))
1769  && (!((a[j]>=(learn_parm->svm_cost[j]-learn_parm->epsilon_a))
1770  && (s>0)))
1771  && (!chosen[j])
1772  && (label[j])
1773  && (!inconsistent[j]))
1774  {
1775  selcrit[activedoc]=(float64_t)label[j]*(learn_parm->eps[j]-(float64_t)label[j]*c[j]+(float64_t)label[j]*lin[j]);
1776  key[activedoc]=j;
1777  activedoc++;
1778  }
1779  }
1780  select_top_n(selcrit,activedoc,select,(int32_t)(qp_size/2));
1781  for (k=0;(choosenum<(qp_size/2)) && (k<(qp_size/2)) && (k<activedoc);k++) {
1782  i=key[select[k]];
1783  chosen[i]=1;
1784  working2dnum[inum+choosenum]=i;
1785  choosenum+=1;
1786  if (use_kernel_cache)
1788  /* make sure it does not get kicked */
1789  /* out of cache */
1790  }
1791 
1792  activedoc=0;
1793  for (i=0;(j=active2dnum[i])>=0;i++) {
1794  s=label[j];
1795  if(cache_only)
1796  {
1797  if (use_kernel_cache)
1798  valid=(kernel->kernel_cache_check(j));
1799  else
1800  valid = 1 ;
1801  }
1802  else
1803  valid=1;
1804  if(valid
1805  && (!((a[j]<=(0+learn_parm->epsilon_a)) && (s<0)))
1806  && (!((a[j]>=(learn_parm->svm_cost[j]-learn_parm->epsilon_a))
1807  && (s>0)))
1808  && (!chosen[j])
1809  && (label[j])
1810  && (!inconsistent[j]))
1811  {
1812  selcrit[activedoc]=-(float64_t)label[j]*(learn_parm->eps[j]-(float64_t)label[j]*c[j]+(float64_t)label[j]*lin[j]);
1813  /* selcrit[activedoc]=-(float64_t)(label[j]*(-1.0+(float64_t)label[j]*lin[j])); */
1814  key[activedoc]=j;
1815  activedoc++;
1816  }
1817  }
1818  select_top_n(selcrit,activedoc,select,(int32_t)(qp_size/2));
1819  for (k=0;(choosenum<qp_size) && (k<(qp_size/2)) && (k<activedoc);k++) {
1820  i=key[select[k]];
1821  chosen[i]=1;
1822  working2dnum[inum+choosenum]=i;
1823  choosenum+=1;
1824  if (use_kernel_cache)
1825  kernel->kernel_cache_touch(i); /* make sure it does not get kicked */
1826  /* out of cache */
1827  }
1828  working2dnum[inum+choosenum]=-1; /* complete index */
1829  return(choosenum);
1830 }
1831 
1833  int32_t* label, float64_t *a, float64_t *lin, float64_t *c, int32_t totdoc,
1834  int32_t qp_size, int32_t *inconsistent, int32_t *active2dnum,
1835  int32_t *working2dnum, float64_t *selcrit, int32_t *select, int32_t *key,
1836  int32_t *chosen, int32_t iteration)
1837 /* Use the feasible direction approach to select the next
1838  qp-subproblem (see section 'Selecting a good working set'). Chooses
1839  a feasible direction at (pseudo) random to help jump over numerical
1840  problem. */
1841 {
1842  int32_t choosenum,i,j,k,activedoc,inum;
1843  float64_t s;
1844 
1845  for (inum=0;working2dnum[inum]>=0;inum++); /* find end of index */
1846  choosenum=0;
1847  activedoc=0;
1848  for (i=0;(j=active2dnum[i])>=0;i++) {
1849  s=-label[j];
1850  if((!((a[j]<=(0+learn_parm->epsilon_a)) && (s<0)))
1851  && (!((a[j]>=(learn_parm->svm_cost[j]-learn_parm->epsilon_a))
1852  && (s>0)))
1853  && (!inconsistent[j])
1854  && (label[j])
1855  && (!chosen[j])) {
1856  selcrit[activedoc]=(j+iteration) % totdoc;
1857  key[activedoc]=j;
1858  activedoc++;
1859  }
1860  }
1861  select_top_n(selcrit,activedoc,select,(int32_t)(qp_size/2));
1862  for (k=0;(choosenum<(qp_size/2)) && (k<(qp_size/2)) && (k<activedoc);k++) {
1863  i=key[select[k]];
1864  chosen[i]=1;
1865  working2dnum[inum+choosenum]=i;
1866  choosenum+=1;
1867  if (use_kernel_cache)
1868  kernel->kernel_cache_touch(i); /* make sure it does not get kicked */
1869  /* out of cache */
1870  }
1871 
1872  activedoc=0;
1873  for (i=0;(j=active2dnum[i])>=0;i++) {
1874  s=label[j];
1875  if((!((a[j]<=(0+learn_parm->epsilon_a)) && (s<0)))
1876  && (!((a[j]>=(learn_parm->svm_cost[j]-learn_parm->epsilon_a))
1877  && (s>0)))
1878  && (!inconsistent[j])
1879  && (label[j])
1880  && (!chosen[j])) {
1881  selcrit[activedoc]=(j+iteration) % totdoc;
1882  key[activedoc]=j;
1883  activedoc++;
1884  }
1885  }
1886  select_top_n(selcrit,activedoc,select,(int32_t)(qp_size/2));
1887  for (k=0;(choosenum<qp_size) && (k<(qp_size/2)) && (k<activedoc);k++) {
1888  i=key[select[k]];
1889  chosen[i]=1;
1890  working2dnum[inum+choosenum]=i;
1891  choosenum+=1;
1892  if (use_kernel_cache)
1893  kernel->kernel_cache_touch(i); /* make sure it does not get kicked */
1894  /* out of cache */
1895  }
1896  working2dnum[inum+choosenum]=-1; /* complete index */
1897  return(choosenum);
1898 }
1899 
1900 
1901 
1903  float64_t *selcrit, int32_t range, int32_t* select, int32_t n)
1904 {
1905  register int32_t i,j;
1906 
1907  for (i=0;(i<n) && (i<range);i++) { /* Initialize with the first n elements */
1908  for (j=i;j>=0;j--) {
1909  if((j>0) && (selcrit[select[j-1]]<selcrit[i])){
1910  select[j]=select[j-1];
1911  }
1912  else {
1913  select[j]=i;
1914  j=-1;
1915  }
1916  }
1917  }
1918  if(n>0) {
1919  for (i=n;i<range;i++) {
1920  if(selcrit[i]>selcrit[select[n-1]]) {
1921  for (j=n-1;j>=0;j--) {
1922  if((j>0) && (selcrit[select[j-1]]<selcrit[i])) {
1923  select[j]=select[j-1];
1924  }
1925  else {
1926  select[j]=i;
1927  j=-1;
1928  }
1929  }
1930  }
1931  }
1932  }
1933 }
1934 
1935 
1936 /******************************** Shrinking *********************************/
1937 
1939  SHRINK_STATE *shrink_state, int32_t totdoc, int32_t maxhistory)
1940 {
1941  int32_t i;
1942 
1943  shrink_state->deactnum=0;
1944  shrink_state->active = SG_MALLOC(int32_t, totdoc);
1945  shrink_state->inactive_since = SG_MALLOC(int32_t, totdoc);
1946  shrink_state->a_history = SG_MALLOC(float64_t*, maxhistory);
1947  shrink_state->maxhistory=maxhistory;
1948  shrink_state->last_lin = SG_MALLOC(float64_t, totdoc);
1949  shrink_state->last_a = SG_MALLOC(float64_t, totdoc);
1950 
1951  for (i=0;i<totdoc;i++) {
1952  shrink_state->active[i]=1;
1953  shrink_state->inactive_since[i]=0;
1954  shrink_state->last_a[i]=0;
1955  shrink_state->last_lin[i]=0;
1956  }
1957 }
1958 
1959 void CSVMLight::shrink_state_cleanup(SHRINK_STATE *shrink_state)
1960 {
1961  SG_FREE(shrink_state->active);
1962  SG_FREE(shrink_state->inactive_since);
1963  if(shrink_state->deactnum > 0)
1964  SG_FREE((shrink_state->a_history[shrink_state->deactnum-1]));
1965  SG_FREE((shrink_state->a_history));
1966  SG_FREE((shrink_state->last_a));
1967  SG_FREE((shrink_state->last_lin));
1968 }
1969 
1971  SHRINK_STATE *shrink_state, int32_t *active2dnum,
1972  int32_t *last_suboptimal_at, int32_t iteration, int32_t totdoc,
1973  int32_t minshrink, float64_t *a, int32_t *inconsistent, float64_t* c,
1974  float64_t* lin, int* label)
1975  /* Shrink some variables away. Do the shrinking only if at least
1976  minshrink variables can be removed. */
1977 {
1978  int32_t i,ii,change,activenum,lastiter;
1979  float64_t *a_old=NULL;
1980 
1981  activenum=0;
1982  change=0;
1983  for (ii=0;active2dnum[ii]>=0;ii++) {
1984  i=active2dnum[ii];
1985  activenum++;
1986  lastiter=last_suboptimal_at[i];
1987 
1988  if(((iteration-lastiter) > learn_parm->svm_iter_to_shrink)
1989  || (inconsistent[i])) {
1990  change++;
1991  }
1992  }
1993  if((change>=minshrink) /* shrink only if sufficiently many candidates */
1994  && (shrink_state->deactnum<shrink_state->maxhistory)) { /* and enough memory */
1995  /* Shrink problem by removing those variables which are */
1996  /* optimal at a bound for a minimum number of iterations */
1997  if(verbosity>=2) {
1998  SG_INFO(" Shrinking...")
1999  }
2000 
2001  if (!(kernel->has_property(KP_LINADD) && get_linadd_enabled())) { /* non-linear case save alphas */
2002 
2003  a_old=SG_MALLOC(float64_t, totdoc);
2004  shrink_state->a_history[shrink_state->deactnum]=a_old;
2005  for (i=0;i<totdoc;i++) {
2006  a_old[i]=a[i];
2007  }
2008  }
2009  for (ii=0;active2dnum[ii]>=0;ii++) {
2010  i=active2dnum[ii];
2011  lastiter=last_suboptimal_at[i];
2012  if(((iteration-lastiter) > learn_parm->svm_iter_to_shrink)
2013  || (inconsistent[i])) {
2014  shrink_state->active[i]=0;
2015  shrink_state->inactive_since[i]=shrink_state->deactnum;
2016  }
2017  }
2018  activenum=compute_index(shrink_state->active,totdoc,active2dnum);
2019  shrink_state->deactnum++;
2021  shrink_state->deactnum=0;
2022 
2023  if(verbosity>=2) {
2024  SG_DONE()
2025  SG_DEBUG("Number of inactive variables = %ld\n", totdoc-activenum)
2026  }
2027  }
2028  return(activenum);
2029 }
2030 
2032 {
2033  S_THREAD_PARAM_REACTIVATE_LINADD* params = (S_THREAD_PARAM_REACTIVATE_LINADD*) p;
2034 
2035  CKernel* k = params->kernel;
2036  float64_t* lin = params->lin;
2037  float64_t* last_lin = params->last_lin;
2038  int32_t* active = params->active;
2039  int32_t* docs = params->docs;
2040  int32_t start = params->start;
2041  int32_t end = params->end;
2042 
2043  for (int32_t i=start;i<end;i++)
2044  {
2045  if (!active[i])
2046  lin[i] = last_lin[i]+k->compute_optimized(docs[i]);
2047 
2048  last_lin[i]=lin[i];
2049  }
2050 
2051  return NULL;
2052 }
2053 
2055 {
2056  S_THREAD_PARAM_REACTIVATE_VANILLA* params = (S_THREAD_PARAM_REACTIVATE_VANILLA*) p;
2057  ASSERT(params)
2058  ASSERT(params->kernel)
2059  ASSERT(params->lin)
2060  ASSERT(params->aicache)
2061  ASSERT(params->a)
2062  ASSERT(params->a_old)
2063  ASSERT(params->changed2dnum)
2064  ASSERT(params->inactive2dnum)
2065  ASSERT(params->label)
2066 
2067  CKernel* k = params->kernel;
2068  float64_t* lin = params->lin;
2069  float64_t* aicache = params->aicache;
2070  float64_t* a= params->a;
2071  float64_t* a_old = params->a_old;
2072  int32_t* changed2dnum = params->changed2dnum;
2073  int32_t* inactive2dnum = params->inactive2dnum;
2074  int32_t* label = params->label;
2075  int32_t start = params->start;
2076  int32_t end = params->end;
2077 
2078  for (int32_t ii=start;ii<end;ii++)
2079  {
2080  int32_t i=changed2dnum[ii];
2081  int32_t j=0;
2082  ASSERT(i>=0)
2083 
2084  k->get_kernel_row(i,inactive2dnum,aicache);
2085  for (int32_t jj=0;(j=inactive2dnum[jj])>=0;jj++)
2086  lin[j]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
2087  }
2088  return NULL;
2089 }
2090 
2092  int32_t* label, float64_t *a, SHRINK_STATE *shrink_state, float64_t *lin,
2093  float64_t *c, int32_t totdoc, int32_t iteration, int32_t *inconsistent,
2094  int32_t* docs, float64_t *aicache, float64_t *maxdiff)
2095  /* Make all variables active again which had been removed by
2096  shrinking. */
2097  /* Computes lin for those variables from scratch. */
2098 {
2099  register int32_t i,j,ii,jj,t,*changed2dnum,*inactive2dnum;
2100  int32_t *changed,*inactive;
2101  register float64_t *a_old,dist;
2102  float64_t ex_c,target;
2103 
2104  if (kernel->has_property(KP_LINADD) && get_linadd_enabled()) { /* special linear case */
2105  a_old=shrink_state->last_a;
2106 
2108  {
2109  SG_DEBUG(" clear normal - linadd\n")
2110  kernel->clear_normal();
2111 
2112  int32_t num_modified=0;
2113  for (i=0;i<totdoc;i++) {
2114  if(a[i] != a_old[i]) {
2115  kernel->add_to_normal(docs[i], ((a[i]-a_old[i])*(float64_t)label[i]));
2116  a_old[i]=a[i];
2117  num_modified++;
2118  }
2119  }
2120 
2121  if (num_modified>0)
2122  {
2123  int32_t num_threads=parallel->get_num_threads();
2124  ASSERT(num_threads>0)
2125  if (num_threads < 2)
2126  {
2127  S_THREAD_PARAM_REACTIVATE_LINADD params;
2128  params.kernel=kernel;
2129  params.lin=lin;
2130  params.last_lin=shrink_state->last_lin;
2131  params.docs=docs;
2132  params.active=shrink_state->active;
2133  params.start=0;
2134  params.end=totdoc;
2136  }
2137 #ifdef HAVE_PTHREAD
2138  else
2139  {
2140  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
2141  S_THREAD_PARAM_REACTIVATE_LINADD* params = SG_MALLOC(S_THREAD_PARAM_REACTIVATE_LINADD, num_threads);
2142  int32_t step= totdoc/num_threads;
2143 
2144  for (t=0; t<num_threads-1; t++)
2145  {
2146  params[t].kernel=kernel;
2147  params[t].lin=lin;
2148  params[t].last_lin=shrink_state->last_lin;
2149  params[t].docs=docs;
2150  params[t].active=shrink_state->active;
2151  params[t].start = t*step;
2152  params[t].end = (t+1)*step;
2153  pthread_create(&threads[t], NULL, CSVMLight::reactivate_inactive_examples_linadd_helper, (void*)&params[t]);
2154  }
2155 
2156  params[t].kernel=kernel;
2157  params[t].lin=lin;
2158  params[t].last_lin=shrink_state->last_lin;
2159  params[t].docs=docs;
2160  params[t].active=shrink_state->active;
2161  params[t].start = t*step;
2162  params[t].end = totdoc;
2163  reactivate_inactive_examples_linadd_helper((void*) &params[t]);
2164 
2165  for (t=0; t<num_threads-1; t++)
2166  pthread_join(threads[t], NULL);
2167 
2168  SG_FREE(threads);
2169  SG_FREE(params);
2170  }
2171 #endif
2172 
2173  }
2174  }
2175  else
2176  {
2177  float64_t *alphas = SG_MALLOC(float64_t, totdoc);
2178  int32_t *idx = SG_MALLOC(int32_t, totdoc);
2179  int32_t num_suppvec=0 ;
2180 
2181  for (i=0; i<totdoc; i++)
2182  {
2183  if(a[i] != a_old[i])
2184  {
2185  alphas[num_suppvec] = (a[i]-a_old[i])*(float64_t)label[i];
2186  idx[num_suppvec] = i ;
2187  a_old[i] = a[i] ;
2188  num_suppvec++ ;
2189  }
2190  }
2191 
2192  if (num_suppvec>0)
2193  {
2194  int32_t num_inactive=0;
2195  int32_t* inactive_idx=SG_MALLOC(int32_t, totdoc); // infact we only need a subset
2196 
2197  j=0;
2198  for (i=0;i<totdoc;i++)
2199  {
2200  if(!shrink_state->active[i])
2201  {
2202  inactive_idx[j++] = i;
2203  num_inactive++;
2204  }
2205  }
2206 
2207  if (num_inactive>0)
2208  {
2209  float64_t* dest = SG_MALLOC(float64_t, num_inactive);
2210  memset(dest, 0, sizeof(float64_t)*num_inactive);
2211 
2212  kernel->compute_batch(num_inactive, inactive_idx, dest, num_suppvec, idx, alphas);
2213 
2214  j=0;
2215  for (i=0;i<totdoc;i++) {
2216  if(!shrink_state->active[i]) {
2217  lin[i] = shrink_state->last_lin[i] + dest[j++] ;
2218  }
2219  shrink_state->last_lin[i]=lin[i];
2220  }
2221 
2222  SG_FREE(dest);
2223  }
2224  else
2225  {
2226  for (i=0;i<totdoc;i++)
2227  shrink_state->last_lin[i]=lin[i];
2228  }
2229  SG_FREE(inactive_idx);
2230  }
2231  SG_FREE(alphas);
2232  SG_FREE(idx);
2233  }
2234 
2236  }
2237  else
2238  {
2239  changed=SG_MALLOC(int32_t, totdoc);
2240  changed2dnum=SG_MALLOC(int32_t, totdoc+11);
2241  inactive=SG_MALLOC(int32_t, totdoc);
2242  inactive2dnum=SG_MALLOC(int32_t, totdoc+11);
2243  for (t=shrink_state->deactnum-1;(t>=0) && shrink_state->a_history[t];t--)
2244  {
2245  if(verbosity>=2) {
2246  SG_INFO("%ld..",t)
2247  }
2248  a_old=shrink_state->a_history[t];
2249  for (i=0;i<totdoc;i++) {
2250  inactive[i]=((!shrink_state->active[i])
2251  && (shrink_state->inactive_since[i] == t));
2252  changed[i]= (a[i] != a_old[i]);
2253  }
2254  compute_index(inactive,totdoc,inactive2dnum);
2255  compute_index(changed,totdoc,changed2dnum);
2256 
2257 
2258  //TODO: PUT THIS BACK IN!!!!!!!! int32_t num_threads=parallel->get_num_threads();
2259  int32_t num_threads=1;
2260  ASSERT(num_threads>0)
2261 
2262  if (num_threads < 2)
2263  {
2264  for (ii=0;(i=changed2dnum[ii])>=0;ii++) {
2265  kernel->get_kernel_row(i,inactive2dnum,aicache);
2266  for (jj=0;(j=inactive2dnum[jj])>=0;jj++)
2267  lin[j]+=(a[i]-a_old[i])*aicache[j]*(float64_t)label[i];
2268  }
2269  }
2270 #ifdef HAVE_PTHREAD
2271  else
2272  {
2273  //find number of the changed ones
2274  int32_t num_changed=0;
2275  for (ii=0;changed2dnum[ii]>=0;ii++)
2276  num_changed++;
2277 
2278  if (num_changed>0)
2279  {
2280  pthread_t* threads= SG_MALLOC(pthread_t, num_threads-1);
2281  S_THREAD_PARAM_REACTIVATE_VANILLA* params = SG_MALLOC(S_THREAD_PARAM_REACTIVATE_VANILLA, num_threads);
2282  int32_t step= num_changed/num_threads;
2283 
2284  // alloc num_threads many tmp buffers
2285  float64_t* tmp_lin=SG_MALLOC(float64_t, totdoc*num_threads);
2286  memset(tmp_lin, 0, sizeof(float64_t)*((size_t) totdoc)*num_threads);
2287  float64_t* tmp_aicache=SG_MALLOC(float64_t, totdoc*num_threads);
2288  memset(tmp_aicache, 0, sizeof(float64_t)*((size_t) totdoc)*num_threads);
2289 
2290  int32_t thr;
2291  for (thr=0; thr<num_threads-1; thr++)
2292  {
2293  params[thr].kernel=kernel;
2294  params[thr].lin=&tmp_lin[thr*totdoc];
2295  params[thr].aicache=&tmp_aicache[thr*totdoc];
2296  params[thr].a=a;
2297  params[thr].a_old=a_old;
2298  params[thr].changed2dnum=changed2dnum;
2299  params[thr].inactive2dnum=inactive2dnum;
2300  params[thr].label=label;
2301  params[thr].start = thr*step;
2302  params[thr].end = (thr+1)*step;
2303  pthread_create(&threads[thr], NULL, CSVMLight::reactivate_inactive_examples_vanilla_helper, (void*)&params[thr]);
2304  }
2305 
2306  params[thr].kernel=kernel;
2307  params[thr].lin=&tmp_lin[thr*totdoc];
2308  params[thr].aicache=&tmp_aicache[thr*totdoc];
2309  params[thr].a=a;
2310  params[thr].a_old=a_old;
2311  params[thr].changed2dnum=changed2dnum;
2312  params[thr].inactive2dnum=inactive2dnum;
2313  params[thr].label=label;
2314  params[thr].start = thr*step;
2315  params[thr].end = num_changed;
2316  reactivate_inactive_examples_vanilla_helper((void*) &params[thr]);
2317 
2318  for (jj=0;(j=inactive2dnum[jj])>=0;jj++)
2319  lin[j]+=tmp_lin[totdoc*thr+j];
2320 
2321  for (thr=0; thr<num_threads-1; thr++)
2322  {
2323  pthread_join(threads[thr], NULL);
2324 
2325  //add up results
2326  for (jj=0;(j=inactive2dnum[jj])>=0;jj++)
2327  lin[j]+=tmp_lin[totdoc*thr+j];
2328  }
2329 
2330  SG_FREE(tmp_lin);
2331  SG_FREE(tmp_aicache);
2332  SG_FREE(threads);
2333  SG_FREE(params);
2334  }
2335  }
2336 #endif
2337  }
2338  SG_FREE(changed);
2339  SG_FREE(changed2dnum);
2340  SG_FREE(inactive);
2341  SG_FREE(inactive2dnum);
2342  }
2343 
2344  (*maxdiff)=0;
2345  for (i=0;i<totdoc;i++) {
2346  shrink_state->inactive_since[i]=shrink_state->deactnum-1;
2347  if(!inconsistent[i]) {
2348  dist=(lin[i]-model->b)*(float64_t)label[i];
2349  target=-(learn_parm->eps[i]-(float64_t)label[i]*c[i]);
2350  ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;
2351  if((a[i]>learn_parm->epsilon_a) && (dist > target)) {
2352  if((dist-target)>(*maxdiff)) /* largest violation */
2353  (*maxdiff)=dist-target;
2354  }
2355  else if((a[i]<ex_c) && (dist < target)) {
2356  if((target-dist)>(*maxdiff)) /* largest violation */
2357  (*maxdiff)=target-dist;
2358  }
2359  if((a[i]>(0+learn_parm->epsilon_a))
2360  && (a[i]<ex_c)) {
2361  shrink_state->active[i]=1; /* not at bound */
2362  }
2363  else if((a[i]<=(0+learn_parm->epsilon_a)) && (dist < (target+learn_parm->epsilon_shrink))) {
2364  shrink_state->active[i]=1;
2365  }
2366  else if((a[i]>=ex_c)
2367  && (dist > (target-learn_parm->epsilon_shrink))) {
2368  shrink_state->active[i]=1;
2369  }
2370  else if(learn_parm->sharedslack) { /* make all active when sharedslack */
2371  shrink_state->active[i]=1;
2372  }
2373  }
2374  }
2375 
2376  if (!(kernel->has_property(KP_LINADD) && get_linadd_enabled())) { /* update history for non-linear */
2377  for (i=0;i<totdoc;i++) {
2378  (shrink_state->a_history[shrink_state->deactnum-1])[i]=a[i];
2379  }
2380  for (t=shrink_state->deactnum-2;(t>=0) && shrink_state->a_history[t];t--) {
2381  SG_FREE(shrink_state->a_history[t]);
2382  shrink_state->a_history[t]=0;
2383  }
2384  }
2385 }
2386 
2387 
2388 
2389 /* start the optimizer and return the optimal values */
2391  QP *qp, float64_t *epsilon_crit, int32_t nx, float64_t *threshold,
2392  int32_t& svm_maxqpsize)
2393 {
2394  register int32_t i, j, result;
2395  float64_t margin, obj_before, obj_after;
2396  float64_t sigdig, dist, epsilon_loqo;
2397  int32_t iter;
2398 
2399  if(!primal) { /* allocate memory at first call */
2400  primal=SG_MALLOC(float64_t, nx*3);
2401  dual=SG_MALLOC(float64_t, nx*2+1);
2402  }
2403 
2404  obj_before=0; /* calculate objective before optimization */
2405  for(i=0;i<qp->opt_n;i++) {
2406  obj_before+=(qp->opt_g0[i]*qp->opt_xinit[i]);
2407  obj_before+=(0.5*qp->opt_xinit[i]*qp->opt_xinit[i]*qp->opt_g[i*qp->opt_n+i]);
2408  for(j=0;j<i;j++) {
2409  obj_before+=(qp->opt_xinit[j]*qp->opt_xinit[i]*qp->opt_g[j*qp->opt_n+i]);
2410  }
2411  }
2412 
2413  result=STILL_RUNNING;
2414  qp->opt_ce0[0]*=(-1.0);
2415  /* Run pr_loqo. If a run fails, try again with parameters which lead */
2416  /* to a slower, but more robust setting. */
2417  for(margin=init_margin,iter=init_iter;
2418  (margin<=0.9999999) && (result!=OPTIMAL_SOLUTION);) {
2419 
2421  sigdig=-log10(opt_precision);
2422 
2423  result=pr_loqo((int32_t)qp->opt_n,(int32_t)qp->opt_m,
2424  (float64_t *)qp->opt_g0,(float64_t *)qp->opt_g,
2425  (float64_t *)qp->opt_ce,(float64_t *)qp->opt_ce0,
2426  (float64_t *)qp->opt_low,(float64_t *)qp->opt_up,
2428  (int32_t)(verbosity-2),
2429  (float64_t)sigdig,(int32_t)iter,
2430  (float64_t)margin,(float64_t)(qp->opt_up[0])/4.0,(int32_t)0);
2431 
2432  if(CMath::is_nan(dual[0])) { /* check for choldc problem */
2433  if(verbosity>=2) {
2434  SG_SDEBUG("Restarting PR_LOQO with more conservative parameters.\n")
2435  }
2436  if(init_margin<0.80) { /* become more conservative in general */
2437  init_margin=(4.0*margin+1.0)/5.0;
2438  }
2439  margin=(margin+1.0)/2.0;
2440  (opt_precision)*=10.0; /* reduce precision */
2441  if(verbosity>=2) {
2442  SG_SDEBUG("Reducing precision of PR_LOQO.\n")
2443  }
2444  }
2445  else if(result!=OPTIMAL_SOLUTION) {
2446  iter+=2000;
2447  init_iter+=10;
2448  (opt_precision)*=10.0; /* reduce precision */
2449  if(verbosity>=2) {
2450  SG_SDEBUG("Reducing precision of PR_LOQO due to (%ld).\n",result)
2451  }
2452  }
2453  }
2454 
2455  if(qp->opt_m) /* Thanks to Alex Smola for this hint */
2456  model_b=dual[0];
2457  else
2458  model_b=0;
2459 
2460  /* Check the precision of the alphas. If results of current optimization */
2461  /* violate KT-Conditions, relax the epsilon on the bounds on alphas. */
2462  epsilon_loqo=1E-10;
2463  for(i=0;i<qp->opt_n;i++) {
2464  dist=-model_b*qp->opt_ce[i];
2465  dist+=(qp->opt_g0[i]+1.0);
2466  for(j=0;j<i;j++) {
2467  dist+=(primal[j]*qp->opt_g[j*qp->opt_n+i]);
2468  }
2469  for(j=i;j<qp->opt_n;j++) {
2470  dist+=(primal[j]*qp->opt_g[i*qp->opt_n+j]);
2471  }
2472  /* SG_SDEBUG("LOQO: a[%d]=%f, dist=%f, b=%f\n",i,primal[i],dist,dual[0]) */
2473  if((primal[i]<(qp->opt_up[i]-epsilon_loqo)) && (dist < (1.0-(*epsilon_crit)))) {
2474  epsilon_loqo=(qp->opt_up[i]-primal[i])*2.0;
2475  }
2476  else if((primal[i]>(0+epsilon_loqo)) && (dist > (1.0+(*epsilon_crit)))) {
2477  epsilon_loqo=primal[i]*2.0;
2478  }
2479  }
2480 
2481  for(i=0;i<qp->opt_n;i++) { /* clip alphas to bounds */
2482  if(primal[i]<=(0+epsilon_loqo)) {
2483  primal[i]=0;
2484  }
2485  else if(primal[i]>=(qp->opt_up[i]-epsilon_loqo)) {
2486  primal[i]=qp->opt_up[i];
2487  }
2488  }
2489 
2490  obj_after=0; /* calculate objective after optimization */
2491  for(i=0;i<qp->opt_n;i++) {
2492  obj_after+=(qp->opt_g0[i]*primal[i]);
2493  obj_after+=(0.5*primal[i]*primal[i]*qp->opt_g[i*qp->opt_n+i]);
2494  for(j=0;j<i;j++) {
2495  obj_after+=(primal[j]*primal[i]*qp->opt_g[j*qp->opt_n+i]);
2496  }
2497  }
2498 
2499  /* if optimizer returned NAN values, reset and retry with smaller */
2500  /* working set. */
2501  if(CMath::is_nan(obj_after) || CMath::is_nan(model_b)) {
2502  for(i=0;i<qp->opt_n;i++) {
2503  primal[i]=qp->opt_xinit[i];
2504  }
2505  model_b=0;
2506  if(svm_maxqpsize>2) {
2507  svm_maxqpsize--; /* decrease size of qp-subproblems */
2508  }
2509  }
2510 
2511  if(obj_after >= obj_before) { /* check whether there was progress */
2512  (opt_precision)/=100.0;
2514  if(verbosity>=2) {
2515  SG_SDEBUG("Increasing Precision of PR_LOQO.\n")
2516  }
2517  }
2518 
2519  // TODO: add parameter for this (e.g. for MTL experiments)
2520  if(precision_violations > 5000) {
2521  (*epsilon_crit)*=10.0;
2523  SG_SINFO("Relaxing epsilon on KT-Conditions.\n")
2524  }
2525 
2526  (*threshold)=model_b;
2527 
2528  if(result!=OPTIMAL_SOLUTION) {
2529  SG_SERROR("PR_LOQO did not converge.\n")
2530  return(qp->opt_xinit);
2531  }
2532  else {
2533  return(primal);
2534  }
2535 }
2536 
2537 
2538 #endif //USE_SVMLIGHT

SHOGUN Machine Learning Toolbox - Documentation