SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
WDSVMOcas.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2007-2008 Vojtech Franc
8  * Written (W) 2007-2009 Soeren Sonnenburg
9  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
13 #ifdef USE_GPL_SHOGUN
14 
15 #include <shogun/labels/Labels.h>
18 #include <shogun/lib/Time.h>
19 #include <shogun/base/Parallel.h>
20 #include <shogun/machine/Machine.h>
21 #include <shogun/lib/external/libocas.h>
24 #include <shogun/labels/Labels.h>
26 
27 using namespace shogun;
28 
29 #ifndef DOXYGEN_SHOULD_SKIP_THIS
30 struct wdocas_thread_params_output
31 {
32  float32_t* out;
33  int32_t* val;
34  float64_t* output;
35  CWDSVMOcas* wdocas;
36  int32_t start;
37  int32_t end;
38 };
39 
40 struct wdocas_thread_params_add
41 {
42  CWDSVMOcas* wdocas;
43  float32_t* new_a;
44  uint32_t* new_cut;
45  int32_t start;
46  int32_t end;
47  uint32_t cut_length;
48 };
49 #endif // DOXYGEN_SHOULD_SKIP_THIS
50 
51 CWDSVMOcas::CWDSVMOcas()
52 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
53  epsilon(1e-3), method(SVM_OCAS)
54 {
55  SG_UNSTABLE("CWDSVMOcas::CWDSVMOcas()", "\n")
56 
57  w=NULL;
58  old_w=NULL;
59  features=NULL;
60  degree=6;
61  from_degree=40;
62  wd_weights=NULL;
63  w_offsets=NULL;
65 }
66 
67 CWDSVMOcas::CWDSVMOcas(E_SVM_TYPE type)
68 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
69  epsilon(1e-3), method(type)
70 {
71  w=NULL;
72  old_w=NULL;
73  features=NULL;
74  degree=6;
75  from_degree=40;
76  wd_weights=NULL;
77  w_offsets=NULL;
79 }
80 
81 CWDSVMOcas::CWDSVMOcas(
82  float64_t C, int32_t d, int32_t from_d, CStringFeatures<uint8_t>* traindat,
83  CLabels* trainlab)
84 : CMachine(), use_bias(false), bufsize(3000), C1(C), C2(C), epsilon(1e-3),
85  degree(d), from_degree(from_d)
86 {
87  w=NULL;
88  old_w=NULL;
89  method=SVM_OCAS;
90  features=traindat;
91  set_labels(trainlab);
92  wd_weights=NULL;
93  w_offsets=NULL;
95 }
96 
97 
98 CWDSVMOcas::~CWDSVMOcas()
99 {
100 }
101 
102 CBinaryLabels* CWDSVMOcas::apply_binary(CFeatures* data)
103 {
104  SGVector<float64_t> outputs = apply_get_outputs(data);
105  return new CBinaryLabels(outputs);
106 }
107 
108 CRegressionLabels* CWDSVMOcas::apply_regression(CFeatures* data)
109 {
110  SGVector<float64_t> outputs = apply_get_outputs(data);
111  return new CRegressionLabels(outputs);
112 }
113 
114 SGVector<float64_t> CWDSVMOcas::apply_get_outputs(CFeatures* data)
115 {
116  if (data)
117  {
118  if (data->get_feature_class() != C_STRING ||
119  data->get_feature_type() != F_BYTE)
120  {
121  SG_ERROR("Features not of class string type byte\n")
122  }
123 
124  set_features((CStringFeatures<uint8_t>*) data);
125  }
126  ASSERT(features)
127 
128  set_wd_weights();
129  set_normalization_const();
130 
131  SGVector<float64_t> outputs;
132  if (features)
133  {
134  int32_t num=features->get_num_vectors();
135  ASSERT(num>0)
136 
137  outputs = SGVector<float64_t>(num);
138 
139  for (int32_t i=0; i<num; i++)
140  outputs[i] = apply_one(i);
141  }
142 
143  return outputs;
144 }
145 
146 int32_t CWDSVMOcas::set_wd_weights()
147 {
148  ASSERT(degree>0 && degree<=8)
149  SG_FREE(wd_weights);
150  wd_weights=SG_MALLOC(float32_t, degree);
151  SG_FREE(w_offsets);
152  w_offsets=SG_MALLOC(int32_t, degree);
153  int32_t w_dim_single_c=0;
154 
155  for (int32_t i=0; i<degree; i++)
156  {
157  w_offsets[i]=CMath::pow(alphabet_size, i+1);
158  wd_weights[i]=sqrt(2.0*(from_degree-i)/(from_degree*(from_degree+1)));
159  w_dim_single_c+=w_offsets[i];
160  }
161  return w_dim_single_c;
162 }
163 
164 bool CWDSVMOcas::train_machine(CFeatures* data)
165 {
166  SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize)
167 
168  ASSERT(m_labels)
169  ASSERT(m_labels->get_label_type() == LT_BINARY)
170  if (data)
171  {
172  if (data->get_feature_class() != C_STRING ||
173  data->get_feature_type() != F_BYTE)
174  {
175  SG_ERROR("Features not of class string type byte\n")
176  }
177  set_features((CStringFeatures<uint8_t>*) data);
178  }
179 
180  ASSERT(get_features())
181  CAlphabet* alphabet=get_features()->get_alphabet();
182  ASSERT(alphabet && alphabet->get_alphabet()==RAWDNA)
183 
184  alphabet_size=alphabet->get_num_symbols();
185  string_length=features->get_num_vectors();
186  SGVector<float64_t> labvec=((CBinaryLabels*) m_labels)->get_labels();
187  lab=labvec.vector;
188 
189  w_dim_single_char=set_wd_weights();
190  //CMath::display_vector(wd_weights, degree, "wd_weights");
191  SG_DEBUG("w_dim_single_char=%d\n", w_dim_single_char)
192  w_dim=string_length*w_dim_single_char;
193  SG_DEBUG("cutting plane has %d dims\n", w_dim)
194  num_vec=get_features()->get_max_vector_length();
195 
196  set_normalization_const();
197  SG_INFO("num_vec: %d num_lab: %d\n", num_vec, labvec.vlen)
198  ASSERT(num_vec==labvec.vlen)
199  ASSERT(num_vec>0)
200 
201  SG_FREE(w);
202  w=SG_MALLOC(float32_t, w_dim);
203  memset(w, 0, w_dim*sizeof(float32_t));
204 
205  SG_FREE(old_w);
206  old_w=SG_MALLOC(float32_t, w_dim);
207  memset(old_w, 0, w_dim*sizeof(float32_t));
208  bias=0;
209  old_bias=0;
210 
211  cuts=SG_MALLOC(float32_t*, bufsize);
212  memset(cuts, 0, sizeof(*cuts)*bufsize);
213  cp_bias=SG_MALLOC(float64_t, bufsize);
214  memset(cp_bias, 0, sizeof(float64_t)*bufsize);
215 
217  /*float64_t* tmp = SG_MALLOC(float64_t, num_vec);
218  float64_t start=CTime::get_curtime();
219  CMath::random_vector(w, w_dim, (float32_t) 0, (float32_t) 1000);
220  compute_output(tmp, this);
221  start=CTime::get_curtime()-start;
222  SG_PRINT("timing:%f\n", start)
223  SG_FREE(tmp);
224  exit(1);*/
226  float64_t TolAbs=0;
227  float64_t QPBound=0;
228  uint8_t Method=0;
229  if (method == SVM_OCAS)
230  Method = 1;
231  ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
232  TolAbs, QPBound, get_max_train_time(), bufsize, Method,
233  &CWDSVMOcas::compute_W,
234  &CWDSVMOcas::update_W,
235  &CWDSVMOcas::add_new_cut,
236  &CWDSVMOcas::compute_output,
237  &CWDSVMOcas::sort,
238  &CWDSVMOcas::print,
239  this);
240 
241  SG_INFO("Ocas Converged after %d iterations\n"
242  "==================================\n"
243  "timing statistics:\n"
244  "output_time: %f s\n"
245  "sort_time: %f s\n"
246  "add_time: %f s\n"
247  "w_time: %f s\n"
248  "solver_time %f s\n"
249  "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
250  result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
251 
252  for (int32_t i=bufsize-1; i>=0; i--)
253  SG_FREE(cuts[i]);
254  SG_FREE(cuts);
255 
256  lab=NULL;
257  SG_UNREF(alphabet);
258 
259  return true;
260 }
261 
262 /*----------------------------------------------------------------------------------
263  sq_norm_W = sparse_update_W( t ) does the following:
264 
265  W = oldW*(1-t) + t*W;
266  sq_norm_W = W'*W;
267 
268  ---------------------------------------------------------------------------------*/
269 float64_t CWDSVMOcas::update_W( float64_t t, void* ptr )
270 {
271  float64_t sq_norm_W = 0;
272  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
273  uint32_t nDim = (uint32_t) o->w_dim;
274  float32_t* W=o->w;
275  float32_t* oldW=o->old_w;
276  float64_t bias=o->bias;
277  float64_t old_bias=bias;
278 
279  for(uint32_t j=0; j <nDim; j++)
280  {
281  W[j] = oldW[j]*(1-t) + t*W[j];
282  sq_norm_W += W[j]*W[j];
283  }
284 
285  bias=old_bias*(1-t) + t*bias;
286  sq_norm_W += CMath::sq(bias);
287 
288  o->bias=bias;
289  o->old_bias=old_bias;
290 
291  return( sq_norm_W );
292 }
293 
294 /*----------------------------------------------------------------------------------
295  sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
296 
297  new_a = sum(data_X(:,find(new_cut ~=0 )),2);
298  new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
299  sparse_A(:,nSel+1) = new_a;
300 
301  ---------------------------------------------------------------------------------*/
302 void* CWDSVMOcas::add_new_cut_helper( void* ptr)
303 {
304  wdocas_thread_params_add* p = (wdocas_thread_params_add*) ptr;
305  CWDSVMOcas* o = p->wdocas;
306  int32_t start = p->start;
307  int32_t end = p->end;
308  int32_t string_length = o->string_length;
309  //uint32_t nDim=(uint32_t) o->w_dim;
310  uint32_t cut_length=p->cut_length;
311  uint32_t* new_cut=p->new_cut;
312  int32_t* w_offsets = o->w_offsets;
313  float64_t* y = o->lab;
314  int32_t alphabet_size = o->alphabet_size;
315  float32_t* wd_weights = o->wd_weights;
316  int32_t degree = o->degree;
318  float64_t normalization_const = o->normalization_const;
319 
320  // temporary vector
321  float32_t* new_a = p->new_a;
322  //float32_t* new_a = SG_MALLOC(float32_t, nDim);
323  //memset(new_a, 0, sizeof(float32_t)*nDim);
324 
325  int32_t* val=SG_MALLOC(int32_t, cut_length);
326  for (int32_t j=start; j<end; j++)
327  {
328  int32_t offs=o->w_dim_single_char*j;
329  memset(val,0,sizeof(int32_t)*cut_length);
330  int32_t lim=CMath::min(degree, string_length-j);
331  int32_t len;
332 
333  for (int32_t k=0; k<lim; k++)
334  {
335  bool free_vec;
336  uint8_t* vec = f->get_feature_vector(j+k, len, free_vec);
337  float32_t wd = wd_weights[k]/normalization_const;
338 
339  for(uint32_t i=0; i < cut_length; i++)
340  {
341  val[i]=val[i]*alphabet_size + vec[new_cut[i]];
342  new_a[offs+val[i]]+=wd * y[new_cut[i]];
343  }
344  offs+=w_offsets[k];
345  f->free_feature_vector(vec, j+k, free_vec);
346  }
347  }
348 
349  //p->new_a=new_a;
350  SG_FREE(val);
351  return NULL;
352 }
353 
354 int CWDSVMOcas::add_new_cut(
355  float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
356  uint32_t nSel, void* ptr)
357 {
358  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
359  uint32_t i;
360  float64_t* c_bias = o->cp_bias;
361  uint32_t nDim=(uint32_t) o->w_dim;
362  float32_t** cuts=o->cuts;
363  float32_t* new_a=SG_MALLOC(float32_t, nDim);
364  memset(new_a, 0, sizeof(float32_t)*nDim);
365 #ifdef HAVE_PTHREAD
366 
367  wdocas_thread_params_add* params_add=SG_MALLOC(wdocas_thread_params_add, o->parallel->get_num_threads());
368  pthread_t* threads=SG_MALLOC(pthread_t, o->parallel->get_num_threads());
369 
370  int32_t string_length = o->string_length;
371  int32_t t;
372  int32_t nthreads=o->parallel->get_num_threads()-1;
373  int32_t step= string_length/o->parallel->get_num_threads();
374 
375  if (step<1)
376  {
377  nthreads=string_length-1;
378  step=1;
379  }
380 
381  for (t=0; t<nthreads; t++)
382  {
383  params_add[t].wdocas=o;
384  //params_add[t].new_a=NULL;
385  params_add[t].new_a=new_a;
386  params_add[t].new_cut=new_cut;
387  params_add[t].start = step*t;
388  params_add[t].end = step*(t+1);
389  params_add[t].cut_length = cut_length;
390 
391  if (pthread_create(&threads[t], NULL, &CWDSVMOcas::add_new_cut_helper, (void*)&params_add[t]) != 0)
392  {
393  nthreads=t;
394  SG_SWARNING("thread creation failed\n")
395  break;
396  }
397  }
398 
399  params_add[t].wdocas=o;
400  //params_add[t].new_a=NULL;
401  params_add[t].new_a=new_a;
402  params_add[t].new_cut=new_cut;
403  params_add[t].start = step*t;
404  params_add[t].end = string_length;
405  params_add[t].cut_length = cut_length;
406  add_new_cut_helper(&params_add[t]);
407  //float32_t* new_a=params_add[t].new_a;
408 
409  for (t=0; t<nthreads; t++)
410  {
411  if (pthread_join(threads[t], NULL) != 0)
412  SG_SWARNING("pthread_join failed\n")
413 
414  //float32_t* a=params_add[t].new_a;
415  //for (i=0; i<nDim; i++)
416  // new_a[i]+=a[i];
417  //SG_FREE(a);
418  }
419  SG_FREE(threads);
420  SG_FREE(params_add);
421 #endif /* HAVE_PTHREAD */
422  for(i=0; i < cut_length; i++)
423  {
424  if (o->use_bias)
425  c_bias[nSel]+=o->lab[new_cut[i]];
426  }
427 
428  // insert new_a into the last column of sparse_A
429  for(i=0; i < nSel; i++)
430  new_col_H[i] = CMath::dot(new_a, cuts[i], nDim) + c_bias[nSel]*c_bias[i];
431  new_col_H[nSel] = CMath::dot(new_a, new_a, nDim) + CMath::sq(c_bias[nSel]);
432 
433  cuts[nSel]=new_a;
434  //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
435  //CMath::display_vector(cuts[nSel], nDim, "cut[nSel]");
436  //
437 
438  return 0;
439 }
440 
441 int CWDSVMOcas::sort( float64_t* vals, float64_t* data, uint32_t size)
442 {
443  CMath::qsort_index(vals, data, size);
444  return 0;
445 }
446 
447 /*----------------------------------------------------------------------
448  sparse_compute_output( output ) does the follwing:
449 
450  output = data_X'*W;
451  ----------------------------------------------------------------------*/
452 void* CWDSVMOcas::compute_output_helper(void* ptr)
453 {
454  wdocas_thread_params_output* p = (wdocas_thread_params_output*) ptr;
455  CWDSVMOcas* o = p->wdocas;
456  int32_t start = p->start;
457  int32_t end = p->end;
458  float32_t* out = p->out;
459  float64_t* output = p->output;
460  int32_t* val = p->val;
461 
463 
464  int32_t degree = o->degree;
465  int32_t string_length = o->string_length;
466  int32_t alphabet_size = o->alphabet_size;
467  int32_t* w_offsets = o->w_offsets;
468  float32_t* wd_weights = o->wd_weights;
469  float32_t* w= o->w;
470 
471  float64_t* y = o->lab;
472  float64_t normalization_const = o->normalization_const;
473 
474 
475  for (int32_t j=0; j<string_length; j++)
476  {
477  int32_t offs=o->w_dim_single_char*j;
478  for (int32_t i=start ; i<end; i++)
479  val[i]=0;
480 
481  int32_t lim=CMath::min(degree, string_length-j);
482  int32_t len;
483 
484  for (int32_t k=0; k<lim; k++)
485  {
486  bool free_vec;
487  uint8_t* vec=f->get_feature_vector(j+k, len, free_vec);
488  float32_t wd = wd_weights[k];
489 
490  for (int32_t i=start; i<end; i++) // quite fast 1.9s
491  {
492  val[i]=val[i]*alphabet_size + vec[i];
493  out[i]+=wd*w[offs+val[i]];
494  }
495 
496  /*for (int32_t i=0; i<nData/4; i++) // slowest 2s
497  {
498  uint32_t x=((uint32_t*) vec)[i];
499  int32_t ii=4*i;
500  val[ii]=val[ii]*alphabet_size + (x&255);
501  val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
502  val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
503  val[ii+3]=val[ii+3]*alphabet_size + (x>>24);
504  out[ii]+=wd*w[offs+val[ii]];
505  out[ii+1]+=wd*w[offs+val[ii+1]];
506  out[ii+2]+=wd*w[offs+val[ii+2]];
507  out[ii+3]+=wd*w[offs+val[ii+3]];
508  }*/
509 
510  /*for (int32_t i=0; i<nData>>3; i++) // fastest on 64bit: 1.5s
511  {
512  uint64_t x=((uint64_t*) vec)[i];
513  int32_t ii=i<<3;
514  val[ii]=val[ii]*alphabet_size + (x&255);
515  val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
516  val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
517  val[ii+3]=val[ii+3]*alphabet_size + ((x>>24)&255);
518  val[ii+4]=val[ii+4]*alphabet_size + ((x>>32)&255);
519  val[ii+5]=val[ii+5]*alphabet_size + ((x>>40)&255);
520  val[ii+6]=val[ii+6]*alphabet_size + ((x>>48)&255);
521  val[ii+7]=val[ii+7]*alphabet_size + (x>>56);
522  out[ii]+=wd*w[offs+val[ii]];
523  out[ii+1]+=wd*w[offs+val[ii+1]];
524  out[ii+2]+=wd*w[offs+val[ii+2]];
525  out[ii+3]+=wd*w[offs+val[ii+3]];
526  out[ii+4]+=wd*w[offs+val[ii+4]];
527  out[ii+5]+=wd*w[offs+val[ii+5]];
528  out[ii+6]+=wd*w[offs+val[ii+6]];
529  out[ii+7]+=wd*w[offs+val[ii+7]];
530  }*/
531  offs+=w_offsets[k];
532  f->free_feature_vector(vec, j+k, free_vec);
533  }
534  }
535 
536  for (int32_t i=start; i<end; i++)
537  output[i]=y[i]*o->bias + out[i]*y[i]/normalization_const;
538 
539  //CMath::display_vector(o->w, o->w_dim, "w");
540  //CMath::display_vector(output, nData, "out");
541  return NULL;
542 }
543 
544 int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
545 {
546 #ifdef HAVE_PTHREAD
547  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
548  int32_t nData=o->num_vec;
549  wdocas_thread_params_output* params_output=SG_MALLOC(wdocas_thread_params_output, o->parallel->get_num_threads());
550  pthread_t* threads = SG_MALLOC(pthread_t, o->parallel->get_num_threads());
551 
552  float32_t* out=SG_MALLOC(float32_t, nData);
553  int32_t* val=SG_MALLOC(int32_t, nData);
554  memset(out, 0, sizeof(float32_t)*nData);
555 
556  int32_t t;
557  int32_t nthreads=o->parallel->get_num_threads()-1;
558  int32_t step= nData/o->parallel->get_num_threads();
559 
560  if (step<1)
561  {
562  nthreads=nData-1;
563  step=1;
564  }
565 
566  for (t=0; t<nthreads; t++)
567  {
568  params_output[t].wdocas=o;
569  params_output[t].output=output;
570  params_output[t].out=out;
571  params_output[t].val=val;
572  params_output[t].start = step*t;
573  params_output[t].end = step*(t+1);
574 
575  //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output)
576  if (pthread_create(&threads[t], NULL, &CWDSVMOcas::compute_output_helper, (void*)&params_output[t]) != 0)
577  {
578  nthreads=t;
579  SG_SWARNING("thread creation failed\n")
580  break;
581  }
582  }
583 
584  params_output[t].wdocas=o;
585  params_output[t].output=output;
586  params_output[t].out=out;
587  params_output[t].val=val;
588  params_output[t].start = step*t;
589  params_output[t].end = nData;
590  compute_output_helper(&params_output[t]);
591  //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output)
592 
593  for (t=0; t<nthreads; t++)
594  {
595  if (pthread_join(threads[t], NULL) != 0)
596  SG_SWARNING("pthread_join failed\n")
597  }
598  SG_FREE(threads);
599  SG_FREE(params_output);
600  SG_FREE(val);
601  SG_FREE(out);
602 #endif /* HAVE_PTHREAD */
603  return 0;
604 }
605 /*----------------------------------------------------------------------
606  sq_norm_W = compute_W( alpha, nSel ) does the following:
607 
608  oldW = W;
609  W = sparse_A(:,1:nSel)'*alpha;
610  sq_norm_W = W'*W;
611  dp_WoldW = W'*oldW';
612 
613  ----------------------------------------------------------------------*/
614 void CWDSVMOcas::compute_W(
615  float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
616  void* ptr)
617 {
618  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
619  uint32_t nDim= (uint32_t) o->w_dim;
620  CMath::swap(o->w, o->old_w);
621  float32_t* W=o->w;
622  float32_t* oldW=o->old_w;
623  float32_t** cuts=o->cuts;
624  memset(W, 0, sizeof(float32_t)*nDim);
625  float64_t* c_bias = o->cp_bias;
626  float64_t old_bias=o->bias;
627  float64_t bias=0;
628 
629  for (uint32_t i=0; i<nSel; i++)
630  {
631  if (alpha[i] > 0)
632  SGVector<float32_t>::vec1_plus_scalar_times_vec2(W, (float32_t) alpha[i], cuts[i], nDim);
633 
634  bias += c_bias[i]*alpha[i];
635  }
636 
637  *sq_norm_W = CMath::dot(W,W, nDim) +CMath::sq(bias);
638  *dp_WoldW = CMath::dot(W,oldW, nDim) + bias*old_bias;;
639  //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW)
640 
641  o->bias = bias;
642  o->old_bias = old_bias;
643 }
644 
645 #endif //USE_GPL_SHOGUN
SGVector< ST > get_feature_vector(int32_t num)
#define SG_INFO(...)
Definition: SGIO.h:118
binary labels +1/-1
Definition: LabelTypes.h:18
Real Labels are real-valued labels.
RAWDNA - letters 0,1,2,3.
Definition: Alphabet.h:29
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
static void qsort_index(T1 *output, T2 *index, uint32_t size)
Definition: Math.h:2202
SGString< ST > * features
#define SG_SWARNING(...)
Definition: SGIO.h:178
static T sq(T x)
Definition: Math.h:450
#define SG_ERROR(...)
Definition: SGIO.h:129
The class Alphabet implements an alphabet and alphabet utility functions.
Definition: Alphabet.h:91
void free_feature_vector(ST *feat_vec, int32_t num, bool dofree)
A generic learning machine interface.
Definition: Machine.h:143
float64_t * wd_weights
Definition: WDFeatures.h:209
#define ASSERT(x)
Definition: SGIO.h:201
void print(CJLCoverTreePoint &p)
double float64_t
Definition: common.h:50
virtual EFeatureClass get_feature_class() const =0
float64_t normalization_const
Definition: WDFeatures.h:212
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
Definition: Math.h:627
static void vec1_plus_scalar_times_vec2(T *vec1, const T scalar, const T *vec2, int32_t n)
x=x+alpha*y
Definition: SGVector.cpp:529
float float32_t
Definition: common.h:49
#define SG_UNREF(x)
Definition: SGObject.h:55
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
SGStringList< ST > get_features()
The class Features is the base class of all feature objects.
Definition: Features.h:68
static T min(T a, T b)
Definition: Math.h:157
Binary Labels for binary classification.
Definition: BinaryLabels.h:37
static void swap(T &a, T &b)
Definition: Math.h:438
#define SG_UNSTABLE(func,...)
Definition: SGIO.h:132
static int32_t pow(bool x, int32_t n)
Definition: Math.h:535
virtual EFeatureType get_feature_type() const =0

SHOGUN Machine Learning Toolbox - Documentation