SHOGUN  v3.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LinearTimeMMD.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2012-2013 Heiko Strathmann
8  */
9 
16 #include <shogun/lib/List.h>
17 
18 #include <shogun/lib/external/libqp.h>
19 
20 using namespace shogun;
21 
24 {
25  init();
26 }
27 
29  CStreamingFeatures* q, index_t m, index_t blocksize) :
30  CKernelTwoSampleTestStatistic(kernel, NULL, m)
31 {
32  init();
33 
34  m_streaming_p=p;
36 
37  m_streaming_q=q;
39 
40  m_blocksize=blocksize;
41 }
42 
44 {
47 
48  /* m_kernel is SG_UNREFed in base desctructor */
49 }
50 
51 void CLinearTimeMMD::init()
52 {
53  SG_ADD((CSGObject**)&m_streaming_p, "streaming_p", "Streaming features p",
55  SG_ADD((CSGObject**)&m_streaming_q, "streaming_q", "Streaming features p",
57  SG_ADD(&m_blocksize, "blocksize", "Number of elements processed at once",
59  SG_ADD(&m_simulate_h0, "simulate_h0", "Whether p and q are mixed",
61 
62  m_streaming_p=NULL;
63  m_streaming_q=NULL;
64  m_blocksize=10000;
65  m_simulate_h0=false;
66 }
67 
69  SGVector<float64_t>& statistic, SGVector<float64_t>& variance,
70  bool multiple_kernels)
71 {
72  SG_DEBUG("entering %s::compute_statistic_and_variance()\n", get_name())
73 
74  REQUIRE(m_streaming_p, "%s::compute_statistic_and_variance: streaming "
75  "features p required!\n", get_name());
76  REQUIRE(m_streaming_q, "%s::compute_statistic_and_variance: streaming "
77  "features q required!\n", get_name());
78 
79  REQUIRE(m_kernel, "%s::compute_statistic_and_variance: kernel needed!\n",
80  get_name());
81 
82  /* make sure multiple_kernels flag is used only with a combined kernel */
83  REQUIRE(!multiple_kernels || m_kernel->get_kernel_type()==K_COMBINED,
84  "%s::compute_statistic_and_variance: multiple kernels specified,"
85  "but underlying kernel is not of type K_COMBINED\n", get_name());
86 
87  /* m is number of samples from each distribution, m_2 is half of it
88  * using names from JLMR paper (see class documentation) */
89  index_t m_2=m_m/2;
90 
91  SG_DEBUG("m_m=%d\n", m_m)
92 
93  /* find out whether single or multiple kernels (cast is safe, check above) */
94  index_t num_kernels=1;
95  if (multiple_kernels)
96  {
97  num_kernels=((CCombinedKernel*)m_kernel)->get_num_subkernels();
98  SG_DEBUG("computing MMD and variance for %d sub-kernels\n",
99  num_kernels);
100  }
101 
102  /* allocate memory for results if vectors are empty */
103  if (!statistic.vector)
104  statistic=SGVector<float64_t>(num_kernels);
105 
106  if (!variance.vector)
107  variance=SGVector<float64_t>(num_kernels);
108 
109  /* ensure right dimensions */
110  REQUIRE(statistic.vlen==num_kernels, "%s::compute_statistic_and_variance: "
111  "statistic vector size (%d) does not match number of kernels (%d)\n",
112  get_name(), statistic.vlen, num_kernels);
113 
114  REQUIRE(variance.vlen==num_kernels, "%s::compute_statistic_and_variance: "
115  "variance vector size (%d) does not match number of kernels (%d)\n",
116  get_name(), variance.vlen, num_kernels);
117 
118  /* temp variable in the algorithm */
119  float64_t current;
121 
122  /* initialise statistic and variance since they are cumulative */
123  statistic.zero();
124  variance.zero();
125 
126  /* needed for online mean and variance */
127  SGVector<index_t> term_counters(num_kernels);
128  term_counters.set_const(1);
129 
130  /* term counter to compute online mean and variance */
131  index_t num_examples_processed=0;
132  while (num_examples_processed<m_2)
133  {
134  /* number of example to look at in this iteration */
135  index_t num_this_run=CMath::min(m_blocksize,
136  CMath::max(0, m_2-num_examples_processed));
137  SG_DEBUG("processing %d more examples. %d so far processed. Blocksize "
138  "is %d\n", num_this_run, num_examples_processed, m_blocksize);
139 
140  /* stream data from both distributions */
141  CFeatures* p1=m_streaming_p->get_streamed_features(num_this_run);
142  CFeatures* p2=m_streaming_p->get_streamed_features(num_this_run);
143  CFeatures* q1=m_streaming_q->get_streamed_features(num_this_run);
144  CFeatures* q2=m_streaming_q->get_streamed_features(num_this_run);
145 
146  /* check whether h0 should be simulated and permute if so */
147  if (m_simulate_h0)
148  {
149  /* create merged copy of all feature instances to permute */
150  CList* list=new CList();
151  list->append_element(p2);
152  list->append_element(q1);
153  list->append_element(q2);
154  CFeatures* merged=p1->create_merged_copy(list);
155  SG_UNREF(list);
156 
157  /* permute */
158  SGVector<index_t> inds(merged->get_num_vectors());
159  inds.range_fill();
160  inds.permute();
161  merged->add_subset(inds);
162 
163  /* copy back, replacing old features */
164  SG_UNREF(p1);
165  SG_UNREF(p2);
166  SG_UNREF(q1);
167  SG_UNREF(q2);
168 
169  SGVector<index_t> copy(num_this_run);
170  copy.range_fill();
171  p1=merged->copy_subset(copy);
172  copy.add(num_this_run);
173  p2=merged->copy_subset(copy);
174  copy.add(num_this_run);
175  q1=merged->copy_subset(copy);
176  copy.add(num_this_run);
177  q2=merged->copy_subset(copy);
178 
179  /* clean up and note that copy_subset does a SG_REF */
180  SG_UNREF(merged);
181  }
182  else
183  {
184  /* reference produced features (only if copy_subset was not used) */
185  SG_REF(p1);
186  SG_REF(p2);
187  SG_REF(q1);
188  SG_REF(q2);
189  }
190 
191  /* if multiple kernels are used, compute all of them on streamed data,
192  * if multiple kernels flag is false, the above loop will be executed
193  * only once */
194  CKernel* kernel=m_kernel;
195  if (multiple_kernels)
196  {
197  SG_DEBUG("using multiple kernels\n");
198  }
199 
200  /* iterate through all kernels for this data */
201  for (index_t i=0; i<num_kernels; ++i)
202  {
203  /* if multiple kernels should be computed, set next kernel */
204  if (multiple_kernels)
205  {
206  kernel=((CCombinedKernel*)m_kernel)->get_kernel(i);
207  }
208 
209  /* compute kernel matrix diagonals */
210  kernel->init(p1, p2);
212 
213  kernel->init(q1, q2);
215 
216  kernel->init(p1, q2);
218 
219  kernel->init(q1, p2);
221 
222  /* single variances for all kernels. Update mean and variance
223  * using Knuth's online variance algorithm.
224  * C.f. for example Wikipedia */
225  for (index_t j=0; j<num_this_run; ++j)
226  {
227  /* compute sum of current h terms for current kernel */
228  current=pp[j]+qq[j]-pq[j]-qp[j];
229 
230  /* D. Knuth's online variance algorithm for current kernel */
231  delta=current-statistic[i];
232  statistic[i]+=delta/term_counters[i]++;
233  variance[i]+=delta*(current-statistic[i]);
234 
235  SG_DEBUG("burst: current=%f, delta=%f, statistic=%f, "
236  "variance=%f, kernel_idx=%d\n", current, delta,
237  statistic[i], variance[i], i);
238  }
239 
240  if (multiple_kernels)
241  {
242  SG_UNREF(kernel);
243  }
244  }
245 
246  /* clean up streamed data */
247  SG_UNREF(p1);
248  SG_UNREF(p2);
249  SG_UNREF(q1);
250  SG_UNREF(q2);
251 
252  /* add number of processed examples for this run */
253  num_examples_processed+=num_this_run;
254  }
255  SG_DEBUG("Done compouting statistic, processed 2*%d examples.\n",
256  num_examples_processed);
257 
258  /* mean of sum all traces is linear time mmd, copy entries for all kernels */
260  statistic.display_vector("statistics");
261 
262  /* variance of terms can be computed using mean (statistic).
263  * Note that the variance needs to be divided by m_2 in order to get
264  * variance of null-distribution */
265  for (index_t i=0; i<num_kernels; ++i)
266  variance[i]=variance[i]/(m_2-1)/m_2;
267 
269  variance.display_vector("variances");
270 
271  SG_DEBUG("leaving %s::compute_statistic_and_variance()\n", get_name())
272 }
273 
276 {
277  SG_DEBUG("entering %s::compute_statistic_and_Q()\n", get_name())
278 
279  REQUIRE(m_streaming_p, "%s::compute_statistic_and_Q: streaming "
280  "features p required!\n", get_name());
281  REQUIRE(m_streaming_q, "%s::compute_statistic_and_Q: streaming "
282  "features q required!\n", get_name());
283 
284  REQUIRE(m_kernel, "%s::compute_statistic_and_Q: kernel needed!\n",
285  get_name());
286 
287  /* make sure multiple_kernels flag is used only with a combined kernel */
289  "%s::compute_statistic_and_Q: underlying kernel is not of "
290  "type K_COMBINED\n", get_name());
291 
292  /* cast combined kernel */
294 
295  /* m is number of samples from each distribution, m_4 is quarter of it */
296  REQUIRE(m_m>=4, "%s::compute_statistic_and_Q: Need at least m>=4\n",
297  get_name());
298  index_t m_4=m_m/4;
299 
300  SG_DEBUG("m_m=%d\n", m_m)
301 
302  /* find out whether single or multiple kernels (cast is safe, check above) */
303  index_t num_kernels=combined->get_num_subkernels();
304  REQUIRE(num_kernels>0, "%s::compute_statistic_and_Q: At least one kernel "
305  "is needed\n", get_name());
306 
307  /* allocate memory for results if vectors are empty */
308  if (!statistic.vector)
309  statistic=SGVector<float64_t>(num_kernels);
310 
311  if (!Q.matrix)
312  Q=SGMatrix<float64_t>(num_kernels, num_kernels);
313 
314  /* ensure right dimensions */
315  REQUIRE(statistic.vlen==num_kernels, "%s::compute_statistic_and_variance: "
316  "statistic vector size (%d) does not match number of kernels (%d)\n",
317  get_name(), statistic.vlen, num_kernels);
318 
319  REQUIRE(Q.num_rows==num_kernels, "%s::compute_statistic_and_variance: "
320  "Q number of rows does (%d) not match number of kernels (%d)\n",
321  get_name(), Q.num_rows, num_kernels);
322 
323  REQUIRE(Q.num_cols==num_kernels, "%s::compute_statistic_and_variance: "
324  "Q number of columns (%d) does not match number of kernels (%d)\n",
325  get_name(), Q.num_cols, num_kernels);
326 
327  /* initialise statistic and variance since they are cumulative */
328  statistic.zero();
329  Q.zero();
330 
331  /* produce two kernel lists to iterate doubly nested */
332  CList* list_i=new CList();
333  CList* list_j=new CList();
334 
335  for (index_t k_idx=0; k_idx<combined->get_num_kernels(); k_idx++)
336  {
337  CKernel* kernel = combined->get_kernel(k_idx);
338  list_i->append_element(kernel);
339  list_j->append_element(kernel);
340  SG_UNREF(kernel);
341  }
342 
343  /* needed for online mean and variance */
344  SGVector<index_t> term_counters_statistic(num_kernels);
345  SGMatrix<index_t> term_counters_Q(num_kernels, num_kernels);
346  term_counters_statistic.set_const(1);
347  term_counters_Q.set_const(1);
348 
349  index_t num_examples_processed=0;
350  while (num_examples_processed<m_4)
351  {
352  /* number of example to look at in this iteration */
353  index_t num_this_run=CMath::min(m_blocksize,
354  CMath::max(0, m_4-num_examples_processed));
355  SG_DEBUG("processing %d more examples. %d so far processed. Blocksize "
356  "is %d\n", num_this_run, num_examples_processed, m_blocksize);
357 
358  /* stream data from both distributions */
359  CFeatures* p1a=m_streaming_p->get_streamed_features(num_this_run);
360  CFeatures* p1b=m_streaming_p->get_streamed_features(num_this_run);
361  CFeatures* p2a=m_streaming_p->get_streamed_features(num_this_run);
362  CFeatures* p2b=m_streaming_p->get_streamed_features(num_this_run);
363  CFeatures* q1a=m_streaming_q->get_streamed_features(num_this_run);
364  CFeatures* q1b=m_streaming_q->get_streamed_features(num_this_run);
365  CFeatures* q2a=m_streaming_q->get_streamed_features(num_this_run);
366  CFeatures* q2b=m_streaming_q->get_streamed_features(num_this_run);
367 
368  /* check whether h0 should be simulated and permute if so */
369  if (m_simulate_h0)
370  {
371  /* create merged copy of all feature instances to permute */
372  CList* list=new CList();
373  list->append_element(p1b);
374  list->append_element(p2a);
375  list->append_element(p2b);
376  list->append_element(q1a);
377  list->append_element(q1b);
378  list->append_element(q2a);
379  list->append_element(q2b);
380  CFeatures* merged=p1a->create_merged_copy(list);
381  SG_UNREF(list);
382 
383  /* permute */
384  SGVector<index_t> inds(merged->get_num_vectors());
385  inds.range_fill();
386  inds.permute();
387  merged->add_subset(inds);
388 
389  /* copy back, replacing old features */
390  SG_UNREF(p1a);
391  SG_UNREF(p1b);
392  SG_UNREF(p2a);
393  SG_UNREF(p2b);
394  SG_UNREF(q1a);
395  SG_UNREF(q1b);
396  SG_UNREF(q2a);
397  SG_UNREF(q2b);
398 
399  SGVector<index_t> copy(num_this_run);
400  copy.range_fill();
401  p1a=merged->copy_subset(copy);
402  copy.add(num_this_run);
403  p1b=merged->copy_subset(copy);
404  copy.add(num_this_run);
405  p2a=merged->copy_subset(copy);
406  copy.add(num_this_run);
407  p2b=merged->copy_subset(copy);
408  copy.add(num_this_run);
409  q1a=merged->copy_subset(copy);
410  copy.add(num_this_run);
411  q1b=merged->copy_subset(copy);
412  copy.add(num_this_run);
413  q2a=merged->copy_subset(copy);
414  copy.add(num_this_run);
415  q2b=merged->copy_subset(copy);
416 
417  /* clean up and note that copy_subset does a SG_REF */
418  SG_UNREF(merged);
419  }
420  else
421  {
422  /* reference the produced features (only if copy subset was not used) */
423  SG_REF(p1a);
424  SG_REF(p1b);
425  SG_REF(p2a);
426  SG_REF(p2b);
427  SG_REF(q1a);
428  SG_REF(q1b);
429  SG_REF(q2a);
430  SG_REF(q2b);
431  }
432 
433  /* now for each of these streamed data instances, iterate through all
434  * kernels and update Q matrix while also computing MMD statistic */
435 
436  /* preallocate some memory for faster processing */
437  SGVector<float64_t> pp(num_this_run);
438  SGVector<float64_t> qq(num_this_run);
439  SGVector<float64_t> pq(num_this_run);
440  SGVector<float64_t> qp(num_this_run);
441  SGVector<float64_t> h_i_a(num_this_run);
442  SGVector<float64_t> h_i_b(num_this_run);
443  SGVector<float64_t> h_j_a(num_this_run);
444  SGVector<float64_t> h_j_b(num_this_run);
445 
446  /* iterate through Q matrix and update values, compute mmd */
447  CKernel* kernel_i=(CKernel*)list_i->get_first_element();
448  for (index_t i=0; i<num_kernels; ++i)
449  {
450  /* compute all necessary 8 h-vectors for this burst.
451  * h_delta-terms for each kernel, expression 7 of NIPS paper
452  * first kernel */
453 
454  /* first kernel, a-part */
455  kernel_i->init(p1a, p2a);
456  pp=kernel_i->get_kernel_diagonal(pp);
457  kernel_i->init(q1a, q2a);
458  qq=kernel_i->get_kernel_diagonal(qq);
459  kernel_i->init(p1a, q2a);
460  pq=kernel_i->get_kernel_diagonal(pq);
461  kernel_i->init(q1a, p2a);
462  qp=kernel_i->get_kernel_diagonal(qp);
463  for (index_t it=0; it<num_this_run; ++it)
464  h_i_a[it]=pp[it]+qq[it]-pq[it]-qp[it];
465 
466  /* first kernel, b-part */
467  kernel_i->init(p1b, p2b);
468  pp=kernel_i->get_kernel_diagonal(pp);
469  kernel_i->init(q1b, q2b);
470  qq=kernel_i->get_kernel_diagonal(qq);
471  kernel_i->init(p1b, q2b);
472  pq=kernel_i->get_kernel_diagonal(pq);
473  kernel_i->init(q1b, p2b);
474  qp=kernel_i->get_kernel_diagonal(qp);
475  for (index_t it=0; it<num_this_run; ++it)
476  h_i_b[it]=pp[it]+qq[it]-pq[it]-qp[it];
477 
478  /* iterate through j, but use symmetry in order to save half of the
479  * computations */
480  CKernel* kernel_j=(CKernel*)list_j->get_first_element();
481  for (index_t j=0; j<=i; ++j)
482  {
483  /* compute all necessary 8 h-vectors for this burst.
484  * h_delta-terms for each kernel, expression 7 of NIPS paper
485  * second kernel */
486 
487  /* second kernel, a-part */
488  kernel_j->init(p1a, p2a);
489  pp=kernel_j->get_kernel_diagonal(pp);
490  kernel_j->init(q1a, q2a);
491  qq=kernel_j->get_kernel_diagonal(qq);
492  kernel_j->init(p1a, q2a);
493  pq=kernel_j->get_kernel_diagonal(pq);
494  kernel_j->init(q1a, p2a);
495  qp=kernel_j->get_kernel_diagonal(qp);
496  for (index_t it=0; it<num_this_run; ++it)
497  h_j_a[it]=pp[it]+qq[it]-pq[it]-qp[it];
498 
499  /* second kernel, b-part */
500  kernel_j->init(p1b, p2b);
501  pp=kernel_j->get_kernel_diagonal(pp);
502  kernel_j->init(q1b, q2b);
503  qq=kernel_j->get_kernel_diagonal(qq);
504  kernel_j->init(p1b, q2b);
505  pq=kernel_j->get_kernel_diagonal(pq);
506  kernel_j->init(q1b, p2b);
507  qp=kernel_j->get_kernel_diagonal(qp);
508  for (index_t it=0; it<num_this_run; ++it)
509  h_j_b[it]=pp[it]+qq[it]-pq[it]-qp[it];
510 
511  float64_t term;
512  for (index_t it=0; it<num_this_run; ++it)
513  {
514  /* current term of expression 7 of NIPS paper */
515  term=(h_i_a[it]-h_i_b[it])*(h_j_a[it]-h_j_b[it]);
516 
517  /* update covariance element for the current burst. This is a
518  * running average of the product of the h_delta terms of each
519  * kernel */
520  Q(i, j)+=(term-Q(i, j))/term_counters_Q(i, j)++;
521  }
522 
523  /* use symmetry */
524  Q(j, i)=Q(i, j);
525 
526  /* next kernel j */
527  kernel_j=(CKernel*)list_j->get_next_element();
528  }
529 
530  /* update MMD statistic online computation for kernel i, using
531  * vectors that were computed above */
532  SGVector<float64_t> h(num_this_run*2);
533  for (index_t it=0; it<num_this_run; ++it)
534  {
535  /* update statistic for kernel i (outer loop) and update using
536  * all elements of the h_i_a, h_i_b vectors (iterate over it) */
537  statistic[i]=statistic[i]+
538  (h_i_a[it]-statistic[i])/term_counters_statistic[i]++;
539 
540  /* Make sure to use all data, i.e. part a and b */
541  statistic[i]=statistic[i]+
542  (h_i_b[it]-statistic[i])/(term_counters_statistic[i]++);
543  }
544 
545  /* next kernel i */
546  kernel_i=(CKernel*)list_i->get_next_element();
547  }
548 
549  /* clean up streamed data */
550  SG_UNREF(p1a);
551  SG_UNREF(p1b);
552  SG_UNREF(p2a);
553  SG_UNREF(p2b);
554  SG_UNREF(q1a);
555  SG_UNREF(q1b);
556  SG_UNREF(q2a);
557  SG_UNREF(q2b);
558 
559  /* add number of processed examples for this run */
560  num_examples_processed+=num_this_run;
561  }
562 
563  /* clean up */
564  SG_UNREF(list_i);
565  SG_UNREF(list_j);
566 
567  SG_DEBUG("Done compouting statistic, processed 4*%d examples.\n",
568  num_examples_processed);
569 
570  SG_DEBUG("leaving %s::compute_statistic_and_Q()\n", get_name())
571 }
572 
574 {
575  /* use wrapper method and compute for single kernel */
576  SGVector<float64_t> statistic;
577  SGVector<float64_t> variance;
578  compute_statistic_and_variance(statistic, variance, false);
579 
580  return statistic[0];
581 }
582 
584  bool multiple_kernels)
585 {
586  /* make sure multiple_kernels flag is used only with a combined kernel */
587  REQUIRE(!multiple_kernels || m_kernel->get_kernel_type()==K_COMBINED,
588  "%s::compute_statistic: multiple kernels specified,"
589  "but underlying kernel is not of type K_COMBINED\n", get_name());
590 
591  SGVector<float64_t> statistic;
592  SGVector<float64_t> variance;
593  compute_statistic_and_variance(statistic, variance, multiple_kernels);
594 
595  return statistic;
596 }
597 
599 {
600  /* use wrapper method and compute for single kernel */
601  SGVector<float64_t> statistic;
602  SGVector<float64_t> variance;
603  compute_statistic_and_variance(statistic, variance, false);
604 
605  return variance[0];
606 }
607 
609 {
610  float64_t result=0;
611 
613  {
614  case MMD1_GAUSSIAN:
615  {
616  /* compute variance and use to estimate Gaussian distribution */
618  result=1.0-CStatistics::normal_cdf(statistic, std_dev);
619  }
620  break;
621 
622  default:
623  /* bootstrapping is handled here */
625  break;
626  }
627 
628  return result;
629 }
630 
632 {
633  float64_t result=0;
634 
636  {
637  case MMD1_GAUSSIAN:
638  {
639  /* compute variance and use to estimate Gaussian distribution */
641  result=1.0-CStatistics::inverse_normal_cdf(1-alpha, 0, std_dev);
642  }
643  break;
644 
645  default:
646  /* bootstrapping is handled here */
648  break;
649  }
650 
651  return result;
652 }
653 
655 {
656  float64_t result=0;
657 
659  {
660  case MMD1_GAUSSIAN:
661  {
662  /* compute variance and use to estimate Gaussian distribution, use
663  * wrapper method and compute for single kernel */
664  SGVector<float64_t> statistic;
665  SGVector<float64_t> variance;
666  compute_statistic_and_variance(statistic, variance, false);
667 
668  /* estimate Gaussian distribution */
669  result=1.0-CStatistics::normal_cdf(statistic[0],
670  CMath::sqrt(variance[0]));
671  }
672  break;
673 
674  default:
675  /* bootstrapping can be done separately in superclass */
677  break;
678  }
679 
680  return result;
681 }
682 
684 {
686 
687  /* instead of permutating samples, just samples new data all the time. */
690  SG_REF(p);
691  SG_REF(q);
692 
693  bool old=m_simulate_h0;
694  set_simulate_h0(true);
695  for (index_t i=0; i<m_bootstrap_iterations; ++i)
696  {
697  /* compute statistic for this permutation of mixed samples */
698  samples[i]=compute_statistic();
699  }
700  set_simulate_h0(old);
701  m_streaming_p=p;
702  m_streaming_q=q;
703  SG_UNREF(p);
704  SG_UNREF(q);
705 
706  return samples;
707 }
708 
710 {
711  SG_ERROR("%s::set_p_and_q(): Method not implemented since linear time mmd"
712  " is based on streaming features\n", get_name());
713 }
714 
716 {
717  SG_ERROR("%s::get_p_and_q(): Method not implemented since linear time mmd"
718  " is based on streaming features\n", get_name());
719  return NULL;
720 }
721 
723 {
725  return m_streaming_p;
726 }
727 
729 {
731  return m_streaming_q;
732 }
733 

SHOGUN Machine Learning Toolbox - Documentation