SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
WDSVMOcas.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2007-2008 Vojtech Franc
8  * Written (W) 2007-2009 Soeren Sonnenburg
9  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/labels/Labels.h>
15 #include <shogun/lib/Time.h>
16 #include <shogun/base/Parallel.h>
17 #include <shogun/machine/Machine.h>
18 #include <shogun/lib/external/libocas.h>
22 #include <shogun/labels/Labels.h>
24 
25 using namespace shogun;
26 
27 #ifndef DOXYGEN_SHOULD_SKIP_THIS
28 struct wdocas_thread_params_output
29 {
30  float32_t* out;
31  int32_t* val;
32  float64_t* output;
33  CWDSVMOcas* wdocas;
34  int32_t start;
35  int32_t end;
36 };
37 
38 struct wdocas_thread_params_add
39 {
40  CWDSVMOcas* wdocas;
41  float32_t* new_a;
42  uint32_t* new_cut;
43  int32_t start;
44  int32_t end;
45  uint32_t cut_length;
46 };
47 #endif // DOXYGEN_SHOULD_SKIP_THIS
48 
50 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
51  epsilon(1e-3), method(SVM_OCAS)
52 {
53  SG_UNSTABLE("CWDSVMOcas::CWDSVMOcas()", "\n")
54 
55  w=NULL;
56  old_w=NULL;
57  features=NULL;
58  degree=6;
59  from_degree=40;
60  wd_weights=NULL;
61  w_offsets=NULL;
63 }
64 
65 CWDSVMOcas::CWDSVMOcas(E_SVM_TYPE type)
66 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
67  epsilon(1e-3), method(type)
68 {
69  w=NULL;
70  old_w=NULL;
71  features=NULL;
72  degree=6;
73  from_degree=40;
74  wd_weights=NULL;
75  w_offsets=NULL;
77 }
78 
80  float64_t C, int32_t d, int32_t from_d, CStringFeatures<uint8_t>* traindat,
81  CLabels* trainlab)
82 : CMachine(), use_bias(false), bufsize(3000), C1(C), C2(C), epsilon(1e-3),
83  degree(d), from_degree(from_d)
84 {
85  w=NULL;
86  old_w=NULL;
87  method=SVM_OCAS;
88  features=traindat;
89  set_labels(trainlab);
90  wd_weights=NULL;
91  w_offsets=NULL;
93 }
94 
95 
97 {
98 }
99 
101 {
102  SGVector<float64_t> outputs = apply_get_outputs(data);
103  return new CBinaryLabels(outputs);
104 }
105 
107 {
108  SGVector<float64_t> outputs = apply_get_outputs(data);
109  return new CRegressionLabels(outputs);
110 }
111 
113 {
114  if (data)
115  {
116  if (data->get_feature_class() != C_STRING ||
117  data->get_feature_type() != F_BYTE)
118  {
119  SG_ERROR("Features not of class string type byte\n")
120  }
121 
123  }
125 
126  set_wd_weights();
128 
129  SGVector<float64_t> outputs;
130  if (features)
131  {
132  int32_t num=features->get_num_vectors();
133  ASSERT(num>0)
134 
135  outputs = SGVector<float64_t>(num);
136 
137  for (int32_t i=0; i<num; i++)
138  outputs[i] = apply_one(i);
139  }
140 
141  return outputs;
142 }
143 
145 {
146  ASSERT(degree>0 && degree<=8)
147  SG_FREE(wd_weights);
148  wd_weights=SG_MALLOC(float32_t, degree);
149  SG_FREE(w_offsets);
150  w_offsets=SG_MALLOC(int32_t, degree);
151  int32_t w_dim_single_c=0;
152 
153  for (int32_t i=0; i<degree; i++)
154  {
156  wd_weights[i]=sqrt(2.0*(from_degree-i)/(from_degree*(from_degree+1)));
157  w_dim_single_c+=w_offsets[i];
158  }
159  return w_dim_single_c;
160 }
161 
163 {
164  SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize)
165 
168  if (data)
169  {
170  if (data->get_feature_class() != C_STRING ||
171  data->get_feature_type() != F_BYTE)
172  {
173  SG_ERROR("Features not of class string type byte\n")
174  }
176  }
177 
179  CAlphabet* alphabet=get_features()->get_alphabet();
180  ASSERT(alphabet && alphabet->get_alphabet()==RAWDNA)
181 
182  alphabet_size=alphabet->get_num_symbols();
184  SGVector<float64_t> labvec=((CBinaryLabels*) m_labels)->get_labels();
185  lab=labvec.vector;
186 
188  //CMath::display_vector(wd_weights, degree, "wd_weights");
189  SG_DEBUG("w_dim_single_char=%d\n", w_dim_single_char)
191  SG_DEBUG("cutting plane has %d dims\n", w_dim)
193 
195  SG_INFO("num_vec: %d num_lab: %d\n", num_vec, labvec.vlen)
196  ASSERT(num_vec==labvec.vlen)
197  ASSERT(num_vec>0)
198 
199  SG_FREE(w);
200  w=SG_MALLOC(float32_t, w_dim);
201  memset(w, 0, w_dim*sizeof(float32_t));
202 
203  SG_FREE(old_w);
204  old_w=SG_MALLOC(float32_t, w_dim);
205  memset(old_w, 0, w_dim*sizeof(float32_t));
206  bias=0;
207  old_bias=0;
208 
209  cuts=SG_MALLOC(float32_t*, bufsize);
210  memset(cuts, 0, sizeof(*cuts)*bufsize);
211  cp_bias=SG_MALLOC(float64_t, bufsize);
212  memset(cp_bias, 0, sizeof(float64_t)*bufsize);
213 
215  /*float64_t* tmp = SG_MALLOC(float64_t, num_vec);
216  float64_t start=CTime::get_curtime();
217  CMath::random_vector(w, w_dim, (float32_t) 0, (float32_t) 1000);
218  compute_output(tmp, this);
219  start=CTime::get_curtime()-start;
220  SG_PRINT("timing:%f\n", start)
221  SG_FREE(tmp);
222  exit(1);*/
224  float64_t TolAbs=0;
225  float64_t QPBound=0;
226  uint8_t Method=0;
227  if (method == SVM_OCAS)
228  Method = 1;
229  ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
230  TolAbs, QPBound, get_max_train_time(), bufsize, Method,
237  this);
238 
239  SG_INFO("Ocas Converged after %d iterations\n"
240  "==================================\n"
241  "timing statistics:\n"
242  "output_time: %f s\n"
243  "sort_time: %f s\n"
244  "add_time: %f s\n"
245  "w_time: %f s\n"
246  "solver_time %f s\n"
247  "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
248  result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
249 
250  for (int32_t i=bufsize-1; i>=0; i--)
251  SG_FREE(cuts[i]);
252  SG_FREE(cuts);
253 
254  lab=NULL;
255  SG_UNREF(alphabet);
256 
257  return true;
258 }
259 
260 /*----------------------------------------------------------------------------------
261  sq_norm_W = sparse_update_W( t ) does the following:
262 
263  W = oldW*(1-t) + t*W;
264  sq_norm_W = W'*W;
265 
266  ---------------------------------------------------------------------------------*/
268 {
269  float64_t sq_norm_W = 0;
270  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
271  uint32_t nDim = (uint32_t) o->w_dim;
272  float32_t* W=o->w;
273  float32_t* oldW=o->old_w;
274  float64_t bias=o->bias;
276 
277  for(uint32_t j=0; j <nDim; j++)
278  {
279  W[j] = oldW[j]*(1-t) + t*W[j];
280  sq_norm_W += W[j]*W[j];
281  }
282 
283  bias=old_bias*(1-t) + t*bias;
284  sq_norm_W += CMath::sq(bias);
285 
286  o->bias=bias;
287  o->old_bias=old_bias;
288 
289  return( sq_norm_W );
290 }
291 
292 /*----------------------------------------------------------------------------------
293  sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
294 
295  new_a = sum(data_X(:,find(new_cut ~=0 )),2);
296  new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
297  sparse_A(:,nSel+1) = new_a;
298 
299  ---------------------------------------------------------------------------------*/
301 {
302  wdocas_thread_params_add* p = (wdocas_thread_params_add*) ptr;
303  CWDSVMOcas* o = p->wdocas;
304  int32_t start = p->start;
305  int32_t end = p->end;
306  int32_t string_length = o->string_length;
307  //uint32_t nDim=(uint32_t) o->w_dim;
308  uint32_t cut_length=p->cut_length;
309  uint32_t* new_cut=p->new_cut;
310  int32_t* w_offsets = o->w_offsets;
311  float64_t* y = o->lab;
312  int32_t alphabet_size = o->alphabet_size;
314  int32_t degree = o->degree;
317 
318  // temporary vector
319  float32_t* new_a = p->new_a;
320  //float32_t* new_a = SG_MALLOC(float32_t, nDim);
321  //memset(new_a, 0, sizeof(float32_t)*nDim);
322 
323  int32_t* val=SG_MALLOC(int32_t, cut_length);
324  for (int32_t j=start; j<end; j++)
325  {
326  int32_t offs=o->w_dim_single_char*j;
327  memset(val,0,sizeof(int32_t)*cut_length);
328  int32_t lim=CMath::min(degree, string_length-j);
329  int32_t len;
330 
331  for (int32_t k=0; k<lim; k++)
332  {
333  bool free_vec;
334  uint8_t* vec = f->get_feature_vector(j+k, len, free_vec);
335  float32_t wd = wd_weights[k]/normalization_const;
336 
337  for(uint32_t i=0; i < cut_length; i++)
338  {
339  val[i]=val[i]*alphabet_size + vec[new_cut[i]];
340  new_a[offs+val[i]]+=wd * y[new_cut[i]];
341  }
342  offs+=w_offsets[k];
343  f->free_feature_vector(vec, j+k, free_vec);
344  }
345  }
346 
347  //p->new_a=new_a;
348  SG_FREE(val);
349  return NULL;
350 }
351 
353  float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
354  uint32_t nSel, void* ptr)
355 {
356  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
357  uint32_t i;
358  float64_t* c_bias = o->cp_bias;
359  uint32_t nDim=(uint32_t) o->w_dim;
360  float32_t** cuts=o->cuts;
361  float32_t* new_a=SG_MALLOC(float32_t, nDim);
362  memset(new_a, 0, sizeof(float32_t)*nDim);
363 #ifdef HAVE_PTHREAD
364 
365  wdocas_thread_params_add* params_add=SG_MALLOC(wdocas_thread_params_add, o->parallel->get_num_threads());
366  pthread_t* threads=SG_MALLOC(pthread_t, o->parallel->get_num_threads());
367 
368  int32_t string_length = o->string_length;
369  int32_t t;
370  int32_t nthreads=o->parallel->get_num_threads()-1;
371  int32_t step= string_length/o->parallel->get_num_threads();
372 
373  if (step<1)
374  {
375  nthreads=string_length-1;
376  step=1;
377  }
378 
379  for (t=0; t<nthreads; t++)
380  {
381  params_add[t].wdocas=o;
382  //params_add[t].new_a=NULL;
383  params_add[t].new_a=new_a;
384  params_add[t].new_cut=new_cut;
385  params_add[t].start = step*t;
386  params_add[t].end = step*(t+1);
387  params_add[t].cut_length = cut_length;
388 
389  if (pthread_create(&threads[t], NULL, &CWDSVMOcas::add_new_cut_helper, (void*)&params_add[t]) != 0)
390  {
391  nthreads=t;
392  SG_SWARNING("thread creation failed\n")
393  break;
394  }
395  }
396 
397  params_add[t].wdocas=o;
398  //params_add[t].new_a=NULL;
399  params_add[t].new_a=new_a;
400  params_add[t].new_cut=new_cut;
401  params_add[t].start = step*t;
402  params_add[t].end = string_length;
403  params_add[t].cut_length = cut_length;
404  add_new_cut_helper(&params_add[t]);
405  //float32_t* new_a=params_add[t].new_a;
406 
407  for (t=0; t<nthreads; t++)
408  {
409  if (pthread_join(threads[t], NULL) != 0)
410  SG_SWARNING("pthread_join failed\n")
411 
412  //float32_t* a=params_add[t].new_a;
413  //for (i=0; i<nDim; i++)
414  // new_a[i]+=a[i];
415  //SG_FREE(a);
416  }
417  SG_FREE(threads);
418  SG_FREE(params_add);
419 #endif /* HAVE_PTHREAD */
420  for(i=0; i < cut_length; i++)
421  {
422  if (o->use_bias)
423  c_bias[nSel]+=o->lab[new_cut[i]];
424  }
425 
426  // insert new_a into the last column of sparse_A
427  for(i=0; i < nSel; i++)
428  new_col_H[i] = CMath::dot(new_a, cuts[i], nDim) + c_bias[nSel]*c_bias[i];
429  new_col_H[nSel] = CMath::dot(new_a, new_a, nDim) + CMath::sq(c_bias[nSel]);
430 
431  cuts[nSel]=new_a;
432  //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
433  //CMath::display_vector(cuts[nSel], nDim, "cut[nSel]");
434  //
435 
436  return 0;
437 }
438 
439 int CWDSVMOcas::sort( float64_t* vals, float64_t* data, uint32_t size)
440 {
441  CMath::qsort_index(vals, data, size);
442  return 0;
443 }
444 
445 /*----------------------------------------------------------------------
446  sparse_compute_output( output ) does the follwing:
447 
448  output = data_X'*W;
449  ----------------------------------------------------------------------*/
451 {
452  wdocas_thread_params_output* p = (wdocas_thread_params_output*) ptr;
453  CWDSVMOcas* o = p->wdocas;
454  int32_t start = p->start;
455  int32_t end = p->end;
456  float32_t* out = p->out;
457  float64_t* output = p->output;
458  int32_t* val = p->val;
459 
461 
462  int32_t degree = o->degree;
463  int32_t string_length = o->string_length;
464  int32_t alphabet_size = o->alphabet_size;
465  int32_t* w_offsets = o->w_offsets;
467  float32_t* w= o->w;
468 
469  float64_t* y = o->lab;
471 
472 
473  for (int32_t j=0; j<string_length; j++)
474  {
475  int32_t offs=o->w_dim_single_char*j;
476  for (int32_t i=start ; i<end; i++)
477  val[i]=0;
478 
479  int32_t lim=CMath::min(degree, string_length-j);
480  int32_t len;
481 
482  for (int32_t k=0; k<lim; k++)
483  {
484  bool free_vec;
485  uint8_t* vec=f->get_feature_vector(j+k, len, free_vec);
486  float32_t wd = wd_weights[k];
487 
488  for (int32_t i=start; i<end; i++) // quite fast 1.9s
489  {
490  val[i]=val[i]*alphabet_size + vec[i];
491  out[i]+=wd*w[offs+val[i]];
492  }
493 
494  /*for (int32_t i=0; i<nData/4; i++) // slowest 2s
495  {
496  uint32_t x=((uint32_t*) vec)[i];
497  int32_t ii=4*i;
498  val[ii]=val[ii]*alphabet_size + (x&255);
499  val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
500  val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
501  val[ii+3]=val[ii+3]*alphabet_size + (x>>24);
502  out[ii]+=wd*w[offs+val[ii]];
503  out[ii+1]+=wd*w[offs+val[ii+1]];
504  out[ii+2]+=wd*w[offs+val[ii+2]];
505  out[ii+3]+=wd*w[offs+val[ii+3]];
506  }*/
507 
508  /*for (int32_t i=0; i<nData>>3; i++) // fastest on 64bit: 1.5s
509  {
510  uint64_t x=((uint64_t*) vec)[i];
511  int32_t ii=i<<3;
512  val[ii]=val[ii]*alphabet_size + (x&255);
513  val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
514  val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
515  val[ii+3]=val[ii+3]*alphabet_size + ((x>>24)&255);
516  val[ii+4]=val[ii+4]*alphabet_size + ((x>>32)&255);
517  val[ii+5]=val[ii+5]*alphabet_size + ((x>>40)&255);
518  val[ii+6]=val[ii+6]*alphabet_size + ((x>>48)&255);
519  val[ii+7]=val[ii+7]*alphabet_size + (x>>56);
520  out[ii]+=wd*w[offs+val[ii]];
521  out[ii+1]+=wd*w[offs+val[ii+1]];
522  out[ii+2]+=wd*w[offs+val[ii+2]];
523  out[ii+3]+=wd*w[offs+val[ii+3]];
524  out[ii+4]+=wd*w[offs+val[ii+4]];
525  out[ii+5]+=wd*w[offs+val[ii+5]];
526  out[ii+6]+=wd*w[offs+val[ii+6]];
527  out[ii+7]+=wd*w[offs+val[ii+7]];
528  }*/
529  offs+=w_offsets[k];
530  f->free_feature_vector(vec, j+k, free_vec);
531  }
532  }
533 
534  for (int32_t i=start; i<end; i++)
535  output[i]=y[i]*o->bias + out[i]*y[i]/normalization_const;
536 
537  //CMath::display_vector(o->w, o->w_dim, "w");
538  //CMath::display_vector(output, nData, "out");
539  return NULL;
540 }
541 
542 int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
543 {
544 #ifdef HAVE_PTHREAD
545  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
546  int32_t nData=o->num_vec;
547  wdocas_thread_params_output* params_output=SG_MALLOC(wdocas_thread_params_output, o->parallel->get_num_threads());
548  pthread_t* threads = SG_MALLOC(pthread_t, o->parallel->get_num_threads());
549 
550  float32_t* out=SG_MALLOC(float32_t, nData);
551  int32_t* val=SG_MALLOC(int32_t, nData);
552  memset(out, 0, sizeof(float32_t)*nData);
553 
554  int32_t t;
555  int32_t nthreads=o->parallel->get_num_threads()-1;
556  int32_t step= nData/o->parallel->get_num_threads();
557 
558  if (step<1)
559  {
560  nthreads=nData-1;
561  step=1;
562  }
563 
564  for (t=0; t<nthreads; t++)
565  {
566  params_output[t].wdocas=o;
567  params_output[t].output=output;
568  params_output[t].out=out;
569  params_output[t].val=val;
570  params_output[t].start = step*t;
571  params_output[t].end = step*(t+1);
572 
573  //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output)
574  if (pthread_create(&threads[t], NULL, &CWDSVMOcas::compute_output_helper, (void*)&params_output[t]) != 0)
575  {
576  nthreads=t;
577  SG_SWARNING("thread creation failed\n")
578  break;
579  }
580  }
581 
582  params_output[t].wdocas=o;
583  params_output[t].output=output;
584  params_output[t].out=out;
585  params_output[t].val=val;
586  params_output[t].start = step*t;
587  params_output[t].end = nData;
588  compute_output_helper(&params_output[t]);
589  //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output)
590 
591  for (t=0; t<nthreads; t++)
592  {
593  if (pthread_join(threads[t], NULL) != 0)
594  SG_SWARNING("pthread_join failed\n")
595  }
596  SG_FREE(threads);
597  SG_FREE(params_output);
598  SG_FREE(val);
599  SG_FREE(out);
600 #endif /* HAVE_PTHREAD */
601  return 0;
602 }
603 /*----------------------------------------------------------------------
604  sq_norm_W = compute_W( alpha, nSel ) does the following:
605 
606  oldW = W;
607  W = sparse_A(:,1:nSel)'*alpha;
608  sq_norm_W = W'*W;
609  dp_WoldW = W'*oldW';
610 
611  ----------------------------------------------------------------------*/
613  float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
614  void* ptr)
615 {
616  CWDSVMOcas* o = (CWDSVMOcas*) ptr;
617  uint32_t nDim= (uint32_t) o->w_dim;
618  CMath::swap(o->w, o->old_w);
619  float32_t* W=o->w;
620  float32_t* oldW=o->old_w;
621  float32_t** cuts=o->cuts;
622  memset(W, 0, sizeof(float32_t)*nDim);
623  float64_t* c_bias = o->cp_bias;
625  float64_t bias=0;
626 
627  for (uint32_t i=0; i<nSel; i++)
628  {
629  if (alpha[i] > 0)
630  SGVector<float32_t>::vec1_plus_scalar_times_vec2(W, (float32_t) alpha[i], cuts[i], nDim);
631 
632  bias += c_bias[i]*alpha[i];
633  }
634 
635  *sq_norm_W = CMath::dot(W,W, nDim) +CMath::sq(bias);
636  *dp_WoldW = CMath::dot(W,oldW, nDim) + bias*old_bias;;
637  //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW)
638 
639  o->bias = bias;
640  o->old_bias = old_bias;
641 }
virtual int32_t get_max_vector_length()
SGVector< ST > get_feature_vector(int32_t num)
float32_t * w
Definition: WDSVMOcas.h:363
#define SG_INFO(...)
Definition: SGIO.h:118
virtual ELabelType get_label_type() const =0
binary labels +1/-1
Definition: LabelTypes.h:18
Real Labels are real-valued labels.
class WDSVMOcas
Definition: WDSVMOcas.h:28
float32_t * wd_weights
Definition: WDSVMOcas.h:341
RAWDNA - letters 0,1,2,3.
Definition: Alphabet.h:29
int32_t get_num_threads() const
Definition: Parallel.cpp:64
float64_t * cp_bias
Definition: WDSVMOcas.h:372
static int sort(float64_t *vals, float64_t *data, uint32_t size)
Definition: WDSVMOcas.cpp:439
The class Labels models labels, i.e. class assignments of objects.
Definition: Labels.h:43
static int add_new_cut(float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length, uint32_t nSel, void *ptr)
Definition: WDSVMOcas.cpp:352
static void qsort_index(T1 *output, T2 *index, uint32_t size)
Definition: Math.h:2202
virtual CRegressionLabels * apply_regression(CFeatures *data=NULL)
Definition: WDSVMOcas.cpp:106
static void * add_new_cut_helper(void *ptr)
Definition: WDSVMOcas.cpp:300
#define SG_SWARNING(...)
Definition: SGIO.h:178
float64_t * lab
Definition: WDSVMOcas.h:367
EAlphabet get_alphabet() const
Definition: Alphabet.h:130
virtual int32_t get_num_vectors() const
static T sq(T x)
Definition: Math.h:450
CLabels * m_labels
Definition: Machine.h:361
E_SVM_TYPE method
Definition: WDSVMOcas.h:334
#define SG_ERROR(...)
Definition: SGIO.h:129
int32_t set_wd_weights()
Definition: WDSVMOcas.cpp:144
static int compute_output(float64_t *output, void *ptr)
Definition: WDSVMOcas.cpp:542
The class Alphabet implements an alphabet and alphabet utility functions.
Definition: Alphabet.h:91
int32_t alphabet_size
Definition: WDSVMOcas.h:347
void free_feature_vector(ST *feat_vec, int32_t num, bool dofree)
float64_t get_C1()
Definition: WDSVMOcas.h:74
Parallel * parallel
Definition: SGObject.h:372
int32_t from_degree
Definition: WDSVMOcas.h:339
virtual ~CWDSVMOcas()
Definition: WDSVMOcas.cpp:96
A generic learning machine interface.
Definition: Machine.h:143
SGVector< float64_t > apply_get_outputs(CFeatures *data)
Definition: WDSVMOcas.cpp:112
static float64_t update_W(float64_t t, void *ptr)
Definition: WDSVMOcas.cpp:267
virtual CBinaryLabels * apply_binary(CFeatures *data=NULL)
Definition: WDSVMOcas.cpp:100
static const float64_t epsilon
Definition: libbmrm.cpp:25
index_t vlen
Definition: SGVector.h:494
CStringFeatures< uint8_t > * get_features()
Definition: WDSVMOcas.h:109
#define ASSERT(x)
Definition: SGIO.h:201
static void * compute_output_helper(void *ptr)
Definition: WDSVMOcas.cpp:450
CStringFeatures< uint8_t > * features
Definition: WDSVMOcas.h:322
float64_t normalization_const
Definition: WDSVMOcas.h:350
float64_t bias
Definition: WDSVMOcas.h:353
double float64_t
Definition: common.h:50
float64_t old_bias
Definition: WDSVMOcas.h:355
void set_normalization_const()
Definition: WDSVMOcas.h:206
int32_t get_num_symbols() const
Definition: Alphabet.h:139
float64_t get_max_train_time()
Definition: Machine.cpp:87
virtual bool train_machine(CFeatures *data=NULL)
Definition: WDSVMOcas.cpp:162
static void compute_W(float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel, void *ptr)
Definition: WDSVMOcas.cpp:612
virtual EFeatureClass get_feature_class() const =0
virtual float64_t apply_one(int32_t num)
Definition: WDSVMOcas.h:177
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
Definition: Math.h:627
static void vec1_plus_scalar_times_vec2(T *vec1, const T scalar, const T *vec2, int32_t n)
x=x+alpha*y
Definition: SGVector.cpp:531
float float32_t
Definition: common.h:49
float32_t ** cuts
Definition: WDSVMOcas.h:370
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
The class Features is the base class of all feature objects.
Definition: Features.h:68
int32_t * w_offsets
Definition: WDSVMOcas.h:357
static T min(T a, T b)
Definition: Math.h:157
int32_t string_length
Definition: WDSVMOcas.h:345
Binary Labels for binary classification.
Definition: BinaryLabels.h:37
static void swap(T &a, T &b)
Definition: Math.h:438
float32_t * old_w
Definition: WDSVMOcas.h:365
static void print(ocas_return_value_T value)
Definition: WDSVMOcas.h:300
int32_t w_dim_single_char
Definition: WDSVMOcas.h:361
float64_t get_epsilon()
Definition: WDSVMOcas.h:92
virtual void set_labels(CLabels *lab)
Definition: Machine.cpp:65
#define SG_UNSTABLE(func,...)
Definition: SGIO.h:132
static int32_t pow(bool x, int32_t n)
Definition: Math.h:535
virtual EFeatureType get_feature_type() const =0
void set_features(CStringFeatures< uint8_t > *feat)
Definition: WDSVMOcas.h:98

SHOGUN Machine Learning Toolbox - Documentation