SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
HMM.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/config.h>
15 #include <shogun/lib/Signal.h>
16 #include <shogun/base/Parallel.h>
19 
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <time.h>
23 #include <ctype.h>
24 
25 #define VAL_MACRO log((default_value == 0) ? (CMath::random(MIN_RAND, MAX_RAND)) : default_value)
26 #define ARRAY_SIZE 65336
27 
28 using namespace shogun;
29 
31 // Construction/Destruction
33 
34 const int32_t CHMM::GOTN= (1<<1);
35 const int32_t CHMM::GOTM= (1<<2);
36 const int32_t CHMM::GOTO= (1<<3);
37 const int32_t CHMM::GOTa= (1<<4);
38 const int32_t CHMM::GOTb= (1<<5);
39 const int32_t CHMM::GOTp= (1<<6);
40 const int32_t CHMM::GOTq= (1<<7);
41 
42 const int32_t CHMM::GOTlearn_a= (1<<1);
43 const int32_t CHMM::GOTlearn_b= (1<<2);
44 const int32_t CHMM::GOTlearn_p= (1<<3);
45 const int32_t CHMM::GOTlearn_q= (1<<4);
46 const int32_t CHMM::GOTconst_a= (1<<5);
47 const int32_t CHMM::GOTconst_b= (1<<6);
48 const int32_t CHMM::GOTconst_p= (1<<7);
49 const int32_t CHMM::GOTconst_q= (1<<8);
50 
51 enum E_STATE
52 {
71 };
72 
73 
74 #ifdef FIX_POS
75 const char Model::FIX_DISALLOWED=0 ;
76 const char Model::FIX_ALLOWED=1 ;
77 const char Model::FIX_DEFAULT=-1 ;
78 const float64_t Model::DISALLOWED_PENALTY=CMath::ALMOST_NEG_INFTY ;
79 #endif
80 
82 {
83  const_a=SG_MALLOC(int, ARRAY_SIZE);
84  const_b=SG_MALLOC(int, ARRAY_SIZE);
85  const_p=SG_MALLOC(int, ARRAY_SIZE);
86  const_q=SG_MALLOC(int, ARRAY_SIZE);
87  const_a_val=SG_MALLOC(float64_t, ARRAY_SIZE);
88  const_b_val=SG_MALLOC(float64_t, ARRAY_SIZE);
89  const_p_val=SG_MALLOC(float64_t, ARRAY_SIZE);
90  const_q_val=SG_MALLOC(float64_t, ARRAY_SIZE);
91 
92 
93  learn_a=SG_MALLOC(int, ARRAY_SIZE);
94  learn_b=SG_MALLOC(int, ARRAY_SIZE);
95  learn_p=SG_MALLOC(int, ARRAY_SIZE);
96  learn_q=SG_MALLOC(int, ARRAY_SIZE);
97 
98 #ifdef FIX_POS
99  fix_pos_state = SG_MALLOC(char, ARRAY_SIZE);
100 #endif
101  for (int32_t i=0; i<ARRAY_SIZE; i++)
102  {
103  const_a[i]=-1 ;
104  const_b[i]=-1 ;
105  const_p[i]=-1 ;
106  const_q[i]=-1 ;
107  const_a_val[i]=1.0 ;
108  const_b_val[i]=1.0 ;
109  const_p_val[i]=1.0 ;
110  const_q_val[i]=1.0 ;
111  learn_a[i]=-1 ;
112  learn_b[i]=-1 ;
113  learn_p[i]=-1 ;
114  learn_q[i]=-1 ;
115 #ifdef FIX_POS
116  fix_pos_state[i] = FIX_DEFAULT ;
117 #endif
118  } ;
119 }
120 
122 {
123  SG_FREE(const_a);
124  SG_FREE(const_b);
125  SG_FREE(const_p);
126  SG_FREE(const_q);
127  SG_FREE(const_a_val);
128  SG_FREE(const_b_val);
129  SG_FREE(const_p_val);
130  SG_FREE(const_q_val);
131 
132  SG_FREE(learn_a);
133  SG_FREE(learn_b);
134  SG_FREE(learn_p);
135  SG_FREE(learn_q);
136 
137 #ifdef FIX_POS
138  SG_FREE(fix_pos_state);
139 #endif
140 
141 }
142 
144 {
145  N=0;
146  M=0;
147  model=NULL;
148  status=false;
149  p_observations=NULL;
150  trans_list_forward_cnt=NULL;
151  trans_list_backward_cnt=NULL;
152  trans_list_forward=NULL;
153  trans_list_backward=NULL;
154  trans_list_forward_val=NULL;
155  iterations=150;
156  epsilon=1e-4;
157  conv_it=5;
158  path=NULL;
159  arrayN1=NULL;
160  arrayN2=NULL;
161  reused_caches=false;
162  transition_matrix_a=NULL;
166 #ifdef USE_LOGSUMARRAY
167  arrayS = NULL;
168 #endif
169 #ifdef USE_HMMPARALLEL_STRUCTURES
170  this->alpha_cache=NULL;
171  this->beta_cache=NULL;
172  path_prob_updated = NULL;
173  path_prob_dimension = NULL;
174 #else
175  this->alpha_cache.table=NULL;
176  this->beta_cache.table=NULL;
177  this->alpha_cache.dimension=0;
178  this->beta_cache.dimension=0;
179 #endif
181  mem_initialized = false;
182 }
183 
185 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
186 {
187 #ifdef USE_HMMPARALLEL_STRUCTURES
188  SG_INFO("hmm is using %i separate tables\n", parallel->get_num_threads())
189 #endif
190 
191  this->N=h->get_N();
192  this->M=h->get_M();
193  status=initialize_hmm(NULL, h->get_pseudo());
194  this->copy_model(h);
196 }
197 
198 CHMM::CHMM(int32_t p_N, int32_t p_M, Model* p_model, float64_t p_PSEUDO)
199 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
200 {
201  this->N=p_N;
202  this->M=p_M;
203  model=NULL ;
204 
205 #ifdef USE_HMMPARALLEL_STRUCTURES
206  SG_INFO("hmm is using %i separate tables\n", parallel->get_num_threads())
207 #endif
208 
209  status=initialize_hmm(p_model, p_PSEUDO);
210 }
211 
213  CStringFeatures<uint16_t>* obs, int32_t p_N, int32_t p_M,
214  float64_t p_PSEUDO)
215 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
216 {
217  this->N=p_N;
218  this->M=p_M;
219  model=NULL ;
220 
221 #ifdef USE_HMMPARALLEL_STRUCTURES
222  SG_INFO("hmm is using %i separate tables\n", parallel->get_num_threads())
223 #endif
224 
225  initialize_hmm(model, p_PSEUDO);
226  set_observations(obs);
227 }
228 
229 CHMM::CHMM(int32_t p_N, float64_t* p, float64_t* q, float64_t* a)
230 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
231 {
232  this->N=p_N;
233  this->M=0;
234  model=NULL ;
235 
236  trans_list_forward = NULL ;
237  trans_list_forward_cnt = NULL ;
238  trans_list_forward_val = NULL ;
239  trans_list_backward = NULL ;
240  trans_list_backward_cnt = NULL ;
241  trans_list_len = 0 ;
242  mem_initialized = false ;
243 
244  this->transition_matrix_a=NULL;
245  this->observation_matrix_b=NULL;
246  this->initial_state_distribution_p=NULL;
247  this->end_state_distribution_q=NULL;
248  this->p_observations=NULL;
249  this->reused_caches=false;
250 
251 #ifdef USE_HMMPARALLEL_STRUCTURES
252  this->alpha_cache=NULL;
253  this->beta_cache=NULL;
254 #else
255  this->alpha_cache.table=NULL;
256  this->beta_cache.table=NULL;
257  this->alpha_cache.dimension=0;
258  this->beta_cache.dimension=0;
259 #endif
260 
261  this->states_per_observation_psi=NULL ;
262  this->path=NULL;
263  arrayN1=NULL ;
264  arrayN2=NULL ;
265 
266  this->loglikelihood=false;
267  mem_initialized = true ;
268 
270  observation_matrix_b=NULL ;
273  transition_matrix_A=NULL ;
274  observation_matrix_B=NULL ;
275 
276 // this->invalidate_model();
277 }
278 
280  int32_t p_N, float64_t* p, float64_t* q, int32_t num_trans,
281  float64_t* a_trans)
282 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
283 {
284  model=NULL ;
285 
286  this->N=p_N;
287  this->M=0;
288 
289  trans_list_forward = NULL ;
290  trans_list_forward_cnt = NULL ;
291  trans_list_forward_val = NULL ;
292  trans_list_backward = NULL ;
293  trans_list_backward_cnt = NULL ;
294  trans_list_len = 0 ;
295  mem_initialized = false ;
296 
297  this->transition_matrix_a=NULL;
298  this->observation_matrix_b=NULL;
299  this->initial_state_distribution_p=NULL;
300  this->end_state_distribution_q=NULL;
301  this->p_observations=NULL;
302  this->reused_caches=false;
303 
304 #ifdef USE_HMMPARALLEL_STRUCTURES
305  this->alpha_cache=NULL;
306  this->beta_cache=NULL;
307 #else
308  this->alpha_cache.table=NULL;
309  this->beta_cache.table=NULL;
310  this->alpha_cache.dimension=0;
311  this->beta_cache.dimension=0;
312 #endif
313 
314  this->states_per_observation_psi=NULL ;
315  this->path=NULL;
316  arrayN1=NULL ;
317  arrayN2=NULL ;
318 
319  this->loglikelihood=false;
320  mem_initialized = true ;
321 
322  trans_list_forward_cnt=NULL ;
323  trans_list_len = N ;
324  trans_list_forward = SG_MALLOC(T_STATES*, N);
325  trans_list_forward_val = SG_MALLOC(float64_t*, N);
326  trans_list_forward_cnt = SG_MALLOC(T_STATES, N);
327 
328  int32_t start_idx=0;
329  for (int32_t j=0; j<N; j++)
330  {
331  int32_t old_start_idx=start_idx;
332 
333  while (start_idx<num_trans && a_trans[start_idx+num_trans]==j)
334  {
335  start_idx++;
336 
337  if (start_idx>1 && start_idx<num_trans)
338  ASSERT(a_trans[start_idx+num_trans-1]<=
339  a_trans[start_idx+num_trans]);
340  }
341 
342  if (start_idx>1 && start_idx<num_trans)
343  ASSERT(a_trans[start_idx+num_trans-1]<=
344  a_trans[start_idx+num_trans]);
345 
346  int32_t len=start_idx-old_start_idx;
347  ASSERT(len>=0)
348 
349  trans_list_forward_cnt[j] = 0 ;
350 
351  if (len>0)
352  {
353  trans_list_forward[j] = SG_MALLOC(T_STATES, len);
354  trans_list_forward_val[j] = SG_MALLOC(float64_t, len);
355  }
356  else
357  {
358  trans_list_forward[j] = NULL;
359  trans_list_forward_val[j] = NULL;
360  }
361  }
362 
363  for (int32_t i=0; i<num_trans; i++)
364  {
365  int32_t from = (int32_t)a_trans[i+num_trans] ;
366  int32_t to = (int32_t)a_trans[i] ;
367  float64_t val = a_trans[i+num_trans*2] ;
368 
369  ASSERT(from>=0 && from<N)
370  ASSERT(to>=0 && to<N)
371 
372  trans_list_forward[from][trans_list_forward_cnt[from]]=to ;
373  trans_list_forward_val[from][trans_list_forward_cnt[from]]=val ;
374  trans_list_forward_cnt[from]++ ;
375  //ASSERT(trans_list_forward_cnt[from]<3000)
376  } ;
377 
378  transition_matrix_a=NULL ;
379  observation_matrix_b=NULL ;
382  transition_matrix_A=NULL ;
383  observation_matrix_B=NULL ;
384 
385 // this->invalidate_model();
386 }
387 
388 
389 CHMM::CHMM(FILE* model_file, float64_t p_PSEUDO)
390 : CDistribution(), iterations(150), epsilon(1e-4), conv_it(5)
391 {
392 #ifdef USE_HMMPARALLEL_STRUCTURES
393  SG_INFO("hmm is using %i separate tables\n", parallel->get_num_threads())
394 #endif
395 
396  status=initialize_hmm(NULL, p_PSEUDO, model_file);
397 }
398 
400 {
402 
403  if (trans_list_forward_cnt)
404  SG_FREE(trans_list_forward_cnt);
405  if (trans_list_backward_cnt)
406  SG_FREE(trans_list_backward_cnt);
407  if (trans_list_forward)
408  {
409  for (int32_t i=0; i<trans_list_len; i++)
410  if (trans_list_forward[i])
411  SG_FREE(trans_list_forward[i]);
412  SG_FREE(trans_list_forward);
413  }
414  if (trans_list_forward_val)
415  {
416  for (int32_t i=0; i<trans_list_len; i++)
417  if (trans_list_forward_val[i])
418  SG_FREE(trans_list_forward_val[i]);
419  SG_FREE(trans_list_forward_val);
420  }
421  if (trans_list_backward)
422  {
423  for (int32_t i=0; i<trans_list_len; i++)
424  if (trans_list_backward[i])
425  SG_FREE(trans_list_backward[i]);
426  SG_FREE(trans_list_backward);
427  } ;
428 
430 
431  if (!reused_caches)
432  {
433 #ifdef USE_HMMPARALLEL_STRUCTURES
434  if (mem_initialized)
435  {
436  for (int32_t i=0; i<parallel->get_num_threads(); i++)
437  {
438  SG_FREE(alpha_cache[i].table);
439  SG_FREE(beta_cache[i].table);
440  alpha_cache[i].table=NULL;
441  beta_cache[i].table=NULL;
442  }
443  }
444  SG_FREE(alpha_cache);
445  SG_FREE(beta_cache);
446  alpha_cache=NULL;
447  beta_cache=NULL;
448 #else // USE_HMMPARALLEL_STRUCTURES
449  SG_FREE(alpha_cache.table);
450  SG_FREE(beta_cache.table);
451  alpha_cache.table=NULL;
452  beta_cache.table=NULL;
453 #endif // USE_HMMPARALLEL_STRUCTURES
454 
457  }
458 
459 #ifdef USE_LOGSUMARRAY
460 #ifdef USE_HMMPARALLEL_STRUCTURES
461  {
462  if (mem_initialized)
463  {
464  for (int32_t i=0; i<parallel->get_num_threads(); i++)
465  SG_FREE(arrayS[i]);
466  }
467  SG_FREE(arrayS);
468  } ;
469 #else //USE_HMMPARALLEL_STRUCTURES
470  SG_FREE(arrayS);
471 #endif //USE_HMMPARALLEL_STRUCTURES
472 #endif //USE_LOGSUMARRAY
473 
474  if (!reused_caches)
475  {
476 #ifdef USE_HMMPARALLEL_STRUCTURES
477  if (mem_initialized)
478  {
479  SG_FREE(path_prob_updated);
480  SG_FREE(path_prob_dimension);
481  for (int32_t i=0; i<parallel->get_num_threads(); i++)
482  SG_FREE(path[i]);
483  }
484 #endif //USE_HMMPARALLEL_STRUCTURES
485  SG_FREE(path);
486  }
487 }
488 
490 {
491  if (data)
492  {
493  if (data->get_feature_class() != C_STRING ||
494  data->get_feature_type() != F_WORD)
495  {
496  SG_ERROR("Expected features of class string type word\n")
497  }
499  }
501 }
502 
504 {
505 
508  {
509  transition_matrix_a=SG_MALLOC(float64_t, N*N);
510  observation_matrix_b=SG_MALLOC(float64_t, N*M);
512  end_state_distribution_q=SG_MALLOC(float64_t, N);
514  convert_to_log();
515  }
516 
517 #ifdef USE_HMMPARALLEL_STRUCTURES
518  for (int32_t i=0; i<parallel->get_num_threads(); i++)
519  {
520  arrayN1[i]=SG_MALLOC(float64_t, N);
521  arrayN2[i]=SG_MALLOC(float64_t, N);
522  }
523 #else //USE_HMMPARALLEL_STRUCTURES
524  arrayN1=SG_MALLOC(float64_t, N);
525  arrayN2=SG_MALLOC(float64_t, N);
526 #endif //USE_HMMPARALLEL_STRUCTURES
527 
528 #ifdef LOG_SUMARRAY
529 #ifdef USE_HMMPARALLEL_STRUCTURES
530  for (int32_t i=0; i<parallel->get_num_threads(); i++)
531  arrayS[i]=SG_MALLOC(float64_t, (int32_t)(this->N/2+1));
532 #else //USE_HMMPARALLEL_STRUCTURES
533  arrayS=SG_MALLOC(float64_t, (int32_t)(this->N/2+1));
534 #endif //USE_HMMPARALLEL_STRUCTURES
535 #endif //LOG_SUMARRAY
536  transition_matrix_A=SG_MALLOC(float64_t, this->N*this->N);
537  observation_matrix_B=SG_MALLOC(float64_t, this->N*this->M);
538 
539  if (p_observations)
540  {
541 #ifdef USE_HMMPARALLEL_STRUCTURES
542  if (alpha_cache[0].table!=NULL)
543 #else //USE_HMMPARALLEL_STRUCTURES
544  if (alpha_cache.table!=NULL)
545 #endif //USE_HMMPARALLEL_STRUCTURES
547  else
550  }
551 
552  this->invalidate_model();
553 
554  return ((transition_matrix_A != NULL) && (observation_matrix_B != NULL) &&
555  (transition_matrix_a != NULL) && (observation_matrix_b != NULL) &&
556  (initial_state_distribution_p != NULL) &&
557  (end_state_distribution_q != NULL));
558 }
559 
561 {
562 #ifdef USE_HMMPARALLEL_STRUCTURES
563  if (arrayN1 && arrayN2)
564  {
565  for (int32_t i=0; i<parallel->get_num_threads(); i++)
566  {
567  SG_FREE(arrayN1[i]);
568  SG_FREE(arrayN2[i]);
569 
570  arrayN1[i]=NULL;
571  arrayN2[i]=NULL;
572  }
573  }
574 #endif
575  SG_FREE(arrayN1);
576  SG_FREE(arrayN2);
577  arrayN1=NULL;
578  arrayN2=NULL;
579 
581  {
582  SG_FREE(transition_matrix_A);
583  SG_FREE(observation_matrix_B);
584  SG_FREE(transition_matrix_a);
585  SG_FREE(observation_matrix_b);
587  SG_FREE(end_state_distribution_q);
588  } ;
589 
590  transition_matrix_A=NULL;
592  transition_matrix_a=NULL;
596 }
597 
598 bool CHMM::initialize_hmm(Model* m, float64_t pseudo, FILE* modelfile)
599 {
600  //yes optimistic
601  bool files_ok=true;
602 
603  trans_list_forward = NULL ;
604  trans_list_forward_cnt = NULL ;
605  trans_list_forward_val = NULL ;
606  trans_list_backward = NULL ;
607  trans_list_backward_cnt = NULL ;
608  trans_list_len = 0 ;
609  mem_initialized = false ;
610 
611  this->transition_matrix_a=NULL;
612  this->observation_matrix_b=NULL;
613  this->initial_state_distribution_p=NULL;
614  this->end_state_distribution_q=NULL;
615  this->PSEUDO= pseudo;
616  this->model= m;
617  this->p_observations=NULL;
618  this->reused_caches=false;
619 
620 #ifdef USE_HMMPARALLEL_STRUCTURES
621  alpha_cache=SG_MALLOC(T_ALPHA_BETA, parallel->get_num_threads());
622  beta_cache=SG_MALLOC(T_ALPHA_BETA, parallel->get_num_threads());
624 
625  for (int32_t i=0; i<parallel->get_num_threads(); i++)
626  {
627  this->alpha_cache[i].table=NULL;
628  this->beta_cache[i].table=NULL;
629  this->alpha_cache[i].dimension=0;
630  this->beta_cache[i].dimension=0;
631  this->states_per_observation_psi[i]=NULL ;
632  }
633 
634 #else // USE_HMMPARALLEL_STRUCTURES
635  this->alpha_cache.table=NULL;
636  this->beta_cache.table=NULL;
637  this->alpha_cache.dimension=0;
638  this->beta_cache.dimension=0;
639  this->states_per_observation_psi=NULL ;
640 #endif //USE_HMMPARALLEL_STRUCTURES
641 
642  if (modelfile)
643  files_ok= files_ok && load_model(modelfile);
644 
645 #ifdef USE_HMMPARALLEL_STRUCTURES
646  path_prob_updated=SG_MALLOC(bool, parallel->get_num_threads());
647  path_prob_dimension=SG_MALLOC(int, parallel->get_num_threads());
648 
649  path=SG_MALLOC(P_STATES, parallel->get_num_threads());
650 
651  for (int32_t i=0; i<parallel->get_num_threads(); i++)
652  this->path[i]=NULL;
653 
654 #else // USE_HMMPARALLEL_STRUCTURES
655  this->path=NULL;
656 
657 #endif //USE_HMMPARALLEL_STRUCTURES
658 
659 #ifdef USE_HMMPARALLEL_STRUCTURES
660  arrayN1=SG_MALLOC(float64_t*, parallel->get_num_threads());
661  arrayN2=SG_MALLOC(float64_t*, parallel->get_num_threads());
662 #endif //USE_HMMPARALLEL_STRUCTURES
663 
664 #ifdef LOG_SUMARRAY
665 #ifdef USE_HMMPARALLEL_STRUCTURES
666  arrayS=SG_MALLOC(float64_t*, parallel->get_num_threads());
667 #endif // USE_HMMPARALLEL_STRUCTURES
668 #endif //LOG_SUMARRAY
669 
671 
672  this->loglikelihood=false;
673  mem_initialized = true ;
674  this->invalidate_model();
675 
676  return ((files_ok) &&
677  (transition_matrix_A != NULL) && (observation_matrix_B != NULL) &&
678  (transition_matrix_a != NULL) && (observation_matrix_b != NULL) && (initial_state_distribution_p != NULL) &&
679  (end_state_distribution_q != NULL));
680 }
681 
682 //------------------------------------------------------------------------------------//
683 
684 //forward algorithm
685 //calculates Pr[O_0,O_1, ..., O_t, q_time=S_i| lambda] for 0<= time <= T-1
686 //Pr[O|lambda] for time > T
687 float64_t CHMM::forward_comp(int32_t time, int32_t state, int32_t dimension)
688 {
689  T_ALPHA_BETA_TABLE* alpha_new;
690  T_ALPHA_BETA_TABLE* alpha;
691  T_ALPHA_BETA_TABLE* dummy;
692  if (time<1)
693  time=0;
694 
695 
696  int32_t wanted_time=time;
697 
698  if (ALPHA_CACHE(dimension).table)
699  {
700  alpha=&ALPHA_CACHE(dimension).table[0];
701  alpha_new=&ALPHA_CACHE(dimension).table[N];
702  time=p_observations->get_vector_length(dimension)+1;
703  }
704  else
705  {
706  alpha_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
707  alpha=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
708  }
709 
710  if (time<1)
711  return get_p(state) + get_b(state, p_observations->get_feature(dimension,0));
712  else
713  {
714  //initialization alpha_1(i)=p_i*b_i(O_1)
715  for (int32_t i=0; i<N; i++)
716  alpha[i] = get_p(i) + get_b(i, p_observations->get_feature(dimension,0)) ;
717 
718  //induction alpha_t+1(j) = (sum_i=1^N alpha_t(i)a_ij) b_j(O_t+1)
719  for (register int32_t t=1; t<time && t < p_observations->get_vector_length(dimension); t++)
720  {
721 
722  for (int32_t j=0; j<N; j++)
723  {
724  register int32_t i, num = trans_list_forward_cnt[j] ;
725  float64_t sum=-CMath::INFTY;
726  for (i=0; i < num; i++)
727  {
728  int32_t ii = trans_list_forward[j][i] ;
729  sum = CMath::logarithmic_sum(sum, alpha[ii] + get_a(ii,j));
730  } ;
731 
732  alpha_new[j]= sum + get_b(j, p_observations->get_feature(dimension,t));
733  }
734 
735  if (!ALPHA_CACHE(dimension).table)
736  {
737  dummy=alpha;
738  alpha=alpha_new;
739  alpha_new=dummy; //switch alpha/alpha_new
740  }
741  else
742  {
743  alpha=alpha_new;
744  alpha_new+=N; //perversely pointer arithmetic
745  }
746  }
747 
748 
749  if (time<p_observations->get_vector_length(dimension))
750  {
751  register int32_t i, num=trans_list_forward_cnt[state];
752  register float64_t sum=-CMath::INFTY;
753  for (i=0; i<num; i++)
754  {
755  int32_t ii = trans_list_forward[state][i] ;
756  sum= CMath::logarithmic_sum(sum, alpha[ii] + get_a(ii, state));
757  } ;
758 
759  return sum + get_b(state, p_observations->get_feature(dimension,time));
760  }
761  else
762  {
763  // termination
764  register int32_t i ;
765  float64_t sum ;
766  sum=-CMath::INFTY;
767  for (i=0; i<N; i++) //sum over all paths
768  sum=CMath::logarithmic_sum(sum, alpha[i] + get_q(i)); //to get model probability
769 
770  if (!ALPHA_CACHE(dimension).table)
771  return sum;
772  else
773  {
774  ALPHA_CACHE(dimension).dimension=dimension;
775  ALPHA_CACHE(dimension).updated=true;
776  ALPHA_CACHE(dimension).sum=sum;
777 
778  if (wanted_time<p_observations->get_vector_length(dimension))
779  return ALPHA_CACHE(dimension).table[wanted_time*N+state];
780  else
781  return ALPHA_CACHE(dimension).sum;
782  }
783  }
784  }
785 }
786 
787 
788 //forward algorithm
789 //calculates Pr[O_0,O_1, ..., O_t, q_time=S_i| lambda] for 0<= time <= T-1
790 //Pr[O|lambda] for time > T
791 float64_t CHMM::forward_comp_old(int32_t time, int32_t state, int32_t dimension)
792 {
793  T_ALPHA_BETA_TABLE* alpha_new;
794  T_ALPHA_BETA_TABLE* alpha;
795  T_ALPHA_BETA_TABLE* dummy;
796  if (time<1)
797  time=0;
798 
799  int32_t wanted_time=time;
800 
801  if (ALPHA_CACHE(dimension).table)
802  {
803  alpha=&ALPHA_CACHE(dimension).table[0];
804  alpha_new=&ALPHA_CACHE(dimension).table[N];
805  time=p_observations->get_vector_length(dimension)+1;
806  }
807  else
808  {
809  alpha_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
810  alpha=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
811  }
812 
813  if (time<1)
814  return get_p(state) + get_b(state, p_observations->get_feature(dimension,0));
815  else
816  {
817  //initialization alpha_1(i)=p_i*b_i(O_1)
818  for (int32_t i=0; i<N; i++)
819  alpha[i] = get_p(i) + get_b(i, p_observations->get_feature(dimension,0)) ;
820 
821  //induction alpha_t+1(j) = (sum_i=1^N alpha_t(i)a_ij) b_j(O_t+1)
822  for (register int32_t t=1; t<time && t < p_observations->get_vector_length(dimension); t++)
823  {
824 
825  for (int32_t j=0; j<N; j++)
826  {
827  register int32_t i ;
828 #ifdef USE_LOGSUMARRAY
829  for (i=0; i<(N>>1); i++)
830  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_a(i<<1,j),
831  alpha[(i<<1)+1] + get_a((i<<1)+1,j));
832  if (N%2==1)
833  alpha_new[j]=get_b(j, p_observations->get_feature(dimension,t))+
834  CMath::logarithmic_sum(alpha[N-1]+get_a(N-1,j),
835  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
836  else
837  alpha_new[j]=get_b(j, p_observations->get_feature(dimension,t))+CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
838 #else //USE_LOGSUMARRAY
839  float64_t sum=-CMath::INFTY;
840  for (i=0; i<N; i++)
841  sum= CMath::logarithmic_sum(sum, alpha[i] + get_a(i,j));
842 
843  alpha_new[j]= sum + get_b(j, p_observations->get_feature(dimension,t));
844 #endif //USE_LOGSUMARRAY
845  }
846 
847  if (!ALPHA_CACHE(dimension).table)
848  {
849  dummy=alpha;
850  alpha=alpha_new;
851  alpha_new=dummy; //switch alpha/alpha_new
852  }
853  else
854  {
855  alpha=alpha_new;
856  alpha_new+=N; //perversely pointer arithmetic
857  }
858  }
859 
860 
861  if (time<p_observations->get_vector_length(dimension))
862  {
863  register int32_t i;
864 #ifdef USE_LOGSUMARRAY
865  for (i=0; i<(N>>1); i++)
866  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_a(i<<1,state),
867  alpha[(i<<1)+1] + get_a((i<<1)+1,state));
868  if (N%2==1)
869  return get_b(state, p_observations->get_feature(dimension,time))+
870  CMath::logarithmic_sum(alpha[N-1]+get_a(N-1,state),
871  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
872  else
873  return get_b(state, p_observations->get_feature(dimension,time))+CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
874 #else //USE_LOGSUMARRAY
875  register float64_t sum=-CMath::INFTY;
876  for (i=0; i<N; i++)
877  sum= CMath::logarithmic_sum(sum, alpha[i] + get_a(i, state));
878 
879  return sum + get_b(state, p_observations->get_feature(dimension,time));
880 #endif //USE_LOGSUMARRAY
881  }
882  else
883  {
884  // termination
885  register int32_t i ;
886  float64_t sum ;
887 #ifdef USE_LOGSUMARRAY
888  for (i=0; i<(N>>1); i++)
889  ARRAYS(dimension)[i]=CMath::logarithmic_sum(alpha[i<<1] + get_q(i<<1),
890  alpha[(i<<1)+1] + get_q((i<<1)+1));
891  if (N%2==1)
892  sum=CMath::logarithmic_sum(alpha[N-1]+get_q(N-1),
893  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
894  else
895  sum=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
896 #else //USE_LOGSUMARRAY
897  sum=-CMath::INFTY;
898  for (i=0; i<N; i++) //sum over all paths
899  sum=CMath::logarithmic_sum(sum, alpha[i] + get_q(i)); //to get model probability
900 #endif //USE_LOGSUMARRAY
901 
902  if (!ALPHA_CACHE(dimension).table)
903  return sum;
904  else
905  {
906  ALPHA_CACHE(dimension).dimension=dimension;
907  ALPHA_CACHE(dimension).updated=true;
908  ALPHA_CACHE(dimension).sum=sum;
909 
910  if (wanted_time<p_observations->get_vector_length(dimension))
911  return ALPHA_CACHE(dimension).table[wanted_time*N+state];
912  else
913  return ALPHA_CACHE(dimension).sum;
914  }
915  }
916  }
917 }
918 
919 
920 //backward algorithm
921 //calculates Pr[O_t+1,O_t+2, ..., O_T| q_time=S_i, lambda] for 0<= time <= T-1
922 //Pr[O|lambda] for time >= T
923 float64_t CHMM::backward_comp(int32_t time, int32_t state, int32_t dimension)
924 {
925  T_ALPHA_BETA_TABLE* beta_new;
926  T_ALPHA_BETA_TABLE* beta;
927  T_ALPHA_BETA_TABLE* dummy;
928  int32_t wanted_time=time;
929 
930  if (time<0)
931  forward(time, state, dimension);
932 
933  if (BETA_CACHE(dimension).table)
934  {
935  beta=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-1)];
936  beta_new=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-2)];
937  time=-1;
938  }
939  else
940  {
941  beta_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
942  beta=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
943  }
944 
945  if (time>=p_observations->get_vector_length(dimension)-1)
946  // return 0;
947  // else if (time==p_observations->get_vector_length(dimension)-1)
948  return get_q(state);
949  else
950  {
951  //initialization beta_T(i)=q(i)
952  for (register int32_t i=0; i<N; i++)
953  beta[i]=get_q(i);
954 
955  //induction beta_t(i) = (sum_j=1^N a_ij*b_j(O_t+1)*beta_t+1(j)
956  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>time+1 && t>0; t--)
957  {
958  for (register int32_t i=0; i<N; i++)
959  {
960  register int32_t j, num=trans_list_backward_cnt[i] ;
961  float64_t sum=-CMath::INFTY;
962  for (j=0; j<num; j++)
963  {
964  int32_t jj = trans_list_backward[i][j] ;
965  sum= CMath::logarithmic_sum(sum, get_a(i, jj) + get_b(jj, p_observations->get_feature(dimension,t)) + beta[jj]);
966  } ;
967  beta_new[i]=sum;
968  }
969 
970  if (!BETA_CACHE(dimension).table)
971  {
972  dummy=beta;
973  beta=beta_new;
974  beta_new=dummy; //switch beta/beta_new
975  }
976  else
977  {
978  beta=beta_new;
979  beta_new-=N; //perversely pointer arithmetic
980  }
981  }
982 
983  if (time>=0)
984  {
985  register int32_t j, num=trans_list_backward_cnt[state] ;
986  float64_t sum=-CMath::INFTY;
987  for (j=0; j<num; j++)
988  {
989  int32_t jj = trans_list_backward[state][j] ;
990  sum= CMath::logarithmic_sum(sum, get_a(state, jj) + get_b(jj, p_observations->get_feature(dimension,time+1))+beta[jj]);
991  } ;
992  return sum;
993  }
994  else // time<0
995  {
996  if (BETA_CACHE(dimension).table)
997  {
998  float64_t sum=-CMath::INFTY;
999  for (register int32_t j=0; j<N; j++)
1000  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1001  BETA_CACHE(dimension).sum=sum;
1002  BETA_CACHE(dimension).dimension=dimension;
1003  BETA_CACHE(dimension).updated=true;
1004 
1005  if (wanted_time<p_observations->get_vector_length(dimension))
1006  return BETA_CACHE(dimension).table[wanted_time*N+state];
1007  else
1008  return BETA_CACHE(dimension).sum;
1009  }
1010  else
1011  {
1012  float64_t sum=-CMath::INFTY; // apply LOG_SUM_ARRAY -- no cache ... does not make very sense anyway...
1013  for (register int32_t j=0; j<N; j++)
1014  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1015  return sum;
1016  }
1017  }
1018  }
1019 }
1020 
1021 
1023  int32_t time, int32_t state, int32_t dimension)
1024 {
1025  T_ALPHA_BETA_TABLE* beta_new;
1026  T_ALPHA_BETA_TABLE* beta;
1027  T_ALPHA_BETA_TABLE* dummy;
1028  int32_t wanted_time=time;
1029 
1030  if (time<0)
1031  forward(time, state, dimension);
1032 
1033  if (BETA_CACHE(dimension).table)
1034  {
1035  beta=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-1)];
1036  beta_new=&BETA_CACHE(dimension).table[N*(p_observations->get_vector_length(dimension)-2)];
1037  time=-1;
1038  }
1039  else
1040  {
1041  beta_new=(T_ALPHA_BETA_TABLE*)ARRAYN1(dimension);
1042  beta=(T_ALPHA_BETA_TABLE*)ARRAYN2(dimension);
1043  }
1044 
1045  if (time>=p_observations->get_vector_length(dimension)-1)
1046  // return 0;
1047  // else if (time==p_observations->get_vector_length(dimension)-1)
1048  return get_q(state);
1049  else
1050  {
1051  //initialization beta_T(i)=q(i)
1052  for (register int32_t i=0; i<N; i++)
1053  beta[i]=get_q(i);
1054 
1055  //induction beta_t(i) = (sum_j=1^N a_ij*b_j(O_t+1)*beta_t+1(j)
1056  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>time+1 && t>0; t--)
1057  {
1058  for (register int32_t i=0; i<N; i++)
1059  {
1060  register int32_t j ;
1061 #ifdef USE_LOGSUMARRAY
1062  for (j=0; j<(N>>1); j++)
1063  ARRAYS(dimension)[j]=CMath::logarithmic_sum(
1064  get_a(i, j<<1) + get_b(j<<1, p_observations->get_feature(dimension,t)) + beta[j<<1],
1065  get_a(i, (j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,t)) + beta[(j<<1)+1]);
1066  if (N%2==1)
1067  beta_new[i]=CMath::logarithmic_sum(get_a(i, N-1) + get_b(N-1, p_observations->get_feature(dimension,t)) + beta[N-1],
1068  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1069  else
1070  beta_new[i]=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1071 #else //USE_LOGSUMARRAY
1072  float64_t sum=-CMath::INFTY;
1073  for (j=0; j<N; j++)
1074  sum= CMath::logarithmic_sum(sum, get_a(i, j) + get_b(j, p_observations->get_feature(dimension,t)) + beta[j]);
1075 
1076  beta_new[i]=sum;
1077 #endif //USE_LOGSUMARRAY
1078  }
1079 
1080  if (!BETA_CACHE(dimension).table)
1081  {
1082  dummy=beta;
1083  beta=beta_new;
1084  beta_new=dummy; //switch beta/beta_new
1085  }
1086  else
1087  {
1088  beta=beta_new;
1089  beta_new-=N; //perversely pointer arithmetic
1090  }
1091  }
1092 
1093  if (time>=0)
1094  {
1095  register int32_t j ;
1096 #ifdef USE_LOGSUMARRAY
1097  for (j=0; j<(N>>1); j++)
1098  ARRAYS(dimension)[j]=CMath::logarithmic_sum(
1099  get_a(state, j<<1) + get_b(j<<1, p_observations->get_feature(dimension,time+1)) + beta[j<<1],
1100  get_a(state, (j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,time+1)) + beta[(j<<1)+1]);
1101  if (N%2==1)
1102  return CMath::logarithmic_sum(get_a(state, N-1) + get_b(N-1, p_observations->get_feature(dimension,time+1)) + beta[N-1],
1103  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1104  else
1105  return CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1106 #else //USE_LOGSUMARRAY
1107  float64_t sum=-CMath::INFTY;
1108  for (j=0; j<N; j++)
1109  sum= CMath::logarithmic_sum(sum, get_a(state, j) + get_b(j, p_observations->get_feature(dimension,time+1))+beta[j]);
1110 
1111  return sum;
1112 #endif //USE_LOGSUMARRAY
1113  }
1114  else // time<0
1115  {
1116  if (BETA_CACHE(dimension).table)
1117  {
1118 #ifdef USE_LOGSUMARRAY//AAA
1119  for (int32_t j=0; j<(N>>1); j++)
1120  ARRAYS(dimension)[j]=CMath::logarithmic_sum(get_p(j<<1) + get_b(j<<1, p_observations->get_feature(dimension,0))+beta[j<<1],
1121  get_p((j<<1)+1) + get_b((j<<1)+1, p_observations->get_feature(dimension,0))+beta[(j<<1)+1]) ;
1122  if (N%2==1)
1123  BETA_CACHE(dimension).sum=CMath::logarithmic_sum(get_p(N-1) + get_b(N-1, p_observations->get_feature(dimension,0))+beta[N-1],
1124  CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1)) ;
1125  else
1126  BETA_CACHE(dimension).sum=CMath::logarithmic_sum_array(ARRAYS(dimension), N>>1) ;
1127 #else //USE_LOGSUMARRAY
1128  float64_t sum=-CMath::INFTY;
1129  for (register int32_t j=0; j<N; j++)
1130  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1131  BETA_CACHE(dimension).sum=sum;
1132 #endif //USE_LOGSUMARRAY
1133  BETA_CACHE(dimension).dimension=dimension;
1134  BETA_CACHE(dimension).updated=true;
1135 
1136  if (wanted_time<p_observations->get_vector_length(dimension))
1137  return BETA_CACHE(dimension).table[wanted_time*N+state];
1138  else
1139  return BETA_CACHE(dimension).sum;
1140  }
1141  else
1142  {
1143  float64_t sum=-CMath::INFTY; // apply LOG_SUM_ARRAY -- no cache ... does not make very sense anyway...
1144  for (register int32_t j=0; j<N; j++)
1145  sum= CMath::logarithmic_sum(sum, get_p(j) + get_b(j, p_observations->get_feature(dimension,0))+beta[j]);
1146  return sum;
1147  }
1148  }
1149  }
1150 }
1151 
1152 //calculates probability of best path through the model lambda AND path itself
1153 //using viterbi algorithm
1154 float64_t CHMM::best_path(int32_t dimension)
1155 {
1156  if (!p_observations)
1157  return -1;
1158 
1159  if (dimension==-1)
1160  {
1161  if (!all_path_prob_updated)
1162  {
1163  SG_INFO("computing full viterbi likelihood\n")
1164  float64_t sum = 0 ;
1165  for (int32_t i=0; i<p_observations->get_num_vectors(); i++)
1166  sum+=best_path(i) ;
1167  sum /= p_observations->get_num_vectors() ;
1168  all_pat_prob=sum ;
1169  all_path_prob_updated=true ;
1170  return sum ;
1171  } else
1172  return all_pat_prob ;
1173  } ;
1174 
1175  if (!STATES_PER_OBSERVATION_PSI(dimension))
1176  return -1 ;
1177 
1178  if (dimension >= p_observations->get_num_vectors())
1179  return -1;
1180 
1181  if (PATH_PROB_UPDATED(dimension) && dimension==PATH_PROB_DIMENSION(dimension))
1182  return pat_prob;
1183  else
1184  {
1185  register float64_t* delta= ARRAYN2(dimension);
1186  register float64_t* delta_new= ARRAYN1(dimension);
1187 
1188  { //initialization
1189  for (register int32_t i=0; i<N; i++)
1190  {
1191  delta[i]=get_p(i)+get_b(i, p_observations->get_feature(dimension,0));
1192  set_psi(0, i, 0, dimension);
1193  }
1194  }
1195 
1196 #ifdef USE_PATHDEBUG
1197  float64_t worst=-CMath::INFTY/4 ;
1198 #endif
1199  //recursion
1200  for (register int32_t t=1; t<p_observations->get_vector_length(dimension); t++)
1201  {
1202  register float64_t* dummy;
1203  register int32_t NN=N ;
1204  for (register int32_t j=0; j<NN; j++)
1205  {
1206  register float64_t * matrix_a=&transition_matrix_a[j*N] ; // sorry for that
1207  register float64_t maxj=delta[0] + matrix_a[0];
1208  register int32_t argmax=0;
1209 
1210  for (register int32_t i=1; i<NN; i++)
1211  {
1212  register float64_t temp = delta[i] + matrix_a[i];
1213 
1214  if (temp>maxj)
1215  {
1216  maxj=temp;
1217  argmax=i;
1218  }
1219  }
1220 #ifdef FIX_POS
1221  if ((!model) || (model->get_fix_pos_state(t,j,NN)!=Model::FIX_DISALLOWED))
1222 #endif
1223  delta_new[j]=maxj + get_b(j,p_observations->get_feature(dimension,t));
1224 #ifdef FIX_POS
1225  else
1226  delta_new[j]=maxj + get_b(j,p_observations->get_feature(dimension,t)) + Model::DISALLOWED_PENALTY;
1227 #endif
1228  set_psi(t, j, argmax, dimension);
1229  }
1230 
1231 #ifdef USE_PATHDEBUG
1232  float64_t best=log(0) ;
1233  for (int32_t jj=0; jj<N; jj++)
1234  if (delta_new[jj]>best)
1235  best=delta_new[jj] ;
1236 
1237  if (best<-CMath::INFTY/2)
1238  {
1239  SG_DEBUG("worst case at %i: %e:%e\n", t, best, worst)
1240  worst=best ;
1241  } ;
1242 #endif
1243 
1244  dummy=delta;
1245  delta=delta_new;
1246  delta_new=dummy; //switch delta/delta_new
1247  }
1248 
1249 
1250  { //termination
1251  register float64_t maxj=delta[0]+get_q(0);
1252  register int32_t argmax=0;
1253 
1254  for (register int32_t i=1; i<N; i++)
1255  {
1256  register float64_t temp=delta[i]+get_q(i);
1257 
1258  if (temp>maxj)
1259  {
1260  maxj=temp;
1261  argmax=i;
1262  }
1263  }
1264  pat_prob=maxj;
1265  PATH(dimension)[p_observations->get_vector_length(dimension)-1]=argmax;
1266  } ;
1267 
1268 
1269  { //state sequence backtracking
1270  for (register int32_t t=p_observations->get_vector_length(dimension)-1; t>0; t--)
1271  {
1272  PATH(dimension)[t-1]=get_psi(t, PATH(dimension)[t], dimension);
1273  }
1274  }
1275  PATH_PROB_UPDATED(dimension)=true;
1276  PATH_PROB_DIMENSION(dimension)=dimension;
1277  return pat_prob ;
1278  }
1279 }
1280 
1281 #ifndef USE_HMMPARALLEL
1283 {
1284  //for faster calculation cache model probability
1285  mod_prob=0 ;
1286  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++) //sum in log space
1288 
1289  mod_prob_updated=true;
1290  return mod_prob;
1291 }
1292 
1293 #else
1294 
1296 {
1297  pthread_t *threads=SG_MALLOC(pthread_t, parallel->get_num_threads());
1298  S_BW_THREAD_PARAM *params=SG_MALLOC(S_BW_THREAD_PARAM, parallel->get_num_threads());
1299 
1300  SG_INFO("computing full model probablity\n")
1301  mod_prob=0;
1302 
1303  for (int32_t cpu=0; cpu<parallel->get_num_threads(); cpu++)
1304  {
1305  params[cpu].hmm=this ;
1306  params[cpu].dim_start= p_observations->get_num_vectors()*cpu/parallel->get_num_threads();
1307  params[cpu].dim_stop= p_observations->get_num_vectors()*(cpu+1)/parallel->get_num_threads();
1308  params[cpu].p_buf=SG_MALLOC(float64_t, N);
1309  params[cpu].q_buf=SG_MALLOC(float64_t, N);
1310  params[cpu].a_buf=SG_MALLOC(float64_t, N*N);
1311  params[cpu].b_buf=SG_MALLOC(float64_t, N*M);
1312  pthread_create(&threads[cpu], NULL, bw_dim_prefetch, (void*)&params[cpu]);
1313  }
1314 
1315  for (int32_t cpu=0; cpu<parallel->get_num_threads(); cpu++)
1316  {
1317  pthread_join(threads[cpu], NULL);
1318  mod_prob+=params[cpu].ret;
1319  }
1320 
1321  for (int32_t i=0; i<parallel->get_num_threads(); i++)
1322  {
1323  SG_FREE(params[i].p_buf);
1324  SG_FREE(params[i].q_buf);
1325  SG_FREE(params[i].a_buf);
1326  SG_FREE(params[i].b_buf);
1327  }
1328 
1329  SG_FREE(threads);
1330  SG_FREE(params);
1331 
1332  mod_prob_updated=true;
1333  return mod_prob;
1334 }
1335 
1336 void* CHMM::bw_dim_prefetch(void* params)
1337 {
1338  CHMM* hmm=((S_BW_THREAD_PARAM*) params)->hmm;
1339  int32_t start=((S_BW_THREAD_PARAM*) params)->dim_start;
1340  int32_t stop=((S_BW_THREAD_PARAM*) params)->dim_stop;
1341  float64_t* p_buf=((S_BW_THREAD_PARAM*) params)->p_buf;
1342  float64_t* q_buf=((S_BW_THREAD_PARAM*) params)->q_buf;
1343  float64_t* a_buf=((S_BW_THREAD_PARAM*) params)->a_buf;
1344  float64_t* b_buf=((S_BW_THREAD_PARAM*) params)->b_buf;
1345  ((S_BW_THREAD_PARAM*)params)->ret=0;
1346 
1347  for (int32_t dim=start; dim<stop; dim++)
1348  {
1349  hmm->forward_comp(hmm->p_observations->get_vector_length(dim), hmm->N-1, dim) ;
1350  hmm->backward_comp(hmm->p_observations->get_vector_length(dim), hmm->N-1, dim) ;
1351  float64_t modprob=hmm->model_probability(dim) ;
1352  hmm->ab_buf_comp(p_buf, q_buf, a_buf, b_buf, dim) ;
1353  ((S_BW_THREAD_PARAM*)params)->ret+= modprob;
1354  }
1355  return NULL ;
1356 }
1357 
1358 void* CHMM::bw_single_dim_prefetch(void * params)
1359 {
1360  CHMM* hmm=((S_BW_THREAD_PARAM*)params)->hmm ;
1361  int32_t dim=((S_DIM_THREAD_PARAM*)params)->dim ;
1362  ((S_DIM_THREAD_PARAM*)params)->prob_sum = hmm->model_probability(dim);
1363  return NULL ;
1364 }
1365 
1366 void* CHMM::vit_dim_prefetch(void * params)
1367 {
1368  CHMM* hmm=((S_DIM_THREAD_PARAM*)params)->hmm ;
1369  int32_t dim=((S_DIM_THREAD_PARAM*)params)->dim ;
1370  ((S_DIM_THREAD_PARAM*)params)->prob_sum = hmm->best_path(dim);
1371  return NULL ;
1372 }
1373 
1374 #endif //USE_HMMPARALLEL
1375 
1376 
1377 #ifdef USE_HMMPARALLEL
1378 
1379 void CHMM::ab_buf_comp(
1380  float64_t* p_buf, float64_t* q_buf, float64_t *a_buf, float64_t* b_buf,
1381  int32_t dim)
1382 {
1383  int32_t i,j,t ;
1384  float64_t a_sum;
1385  float64_t b_sum;
1386 
1387  float64_t dimmodprob=model_probability(dim);
1388 
1389  for (i=0; i<N; i++)
1390  {
1391  //estimate initial+end state distribution numerator
1392  p_buf[i]=get_p(i)+get_b(i,p_observations->get_feature(dim,0))+backward(0,i,dim) - dimmodprob;
1393  q_buf[i]=forward(p_observations->get_vector_length(dim)-1, i, dim)+get_q(i) - dimmodprob;
1394 
1395  //estimate a
1396  for (j=0; j<N; j++)
1397  {
1398  a_sum=-CMath::INFTY;
1399 
1400  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1401  {
1402  a_sum= CMath::logarithmic_sum(a_sum, forward(t,i,dim)+
1403  get_a(i,j)+get_b(j,p_observations->get_feature(dim,t+1))+backward(t+1,j,dim));
1404  }
1405  a_buf[N*i+j]=a_sum-dimmodprob ;
1406  }
1407 
1408  //estimate b
1409  for (j=0; j<M; j++)
1410  {
1411  b_sum=-CMath::INFTY;
1412 
1413  for (t=0; t<p_observations->get_vector_length(dim); t++)
1414  {
1415  if (p_observations->get_feature(dim,t)==j)
1416  b_sum=CMath::logarithmic_sum(b_sum, forward(t,i,dim)+backward(t, i, dim));
1417  }
1418 
1419  b_buf[M*i+j]=b_sum-dimmodprob ;
1420  }
1421  }
1422 }
1423 
1424 //estimates new model lambda out of lambda_train using baum welch algorithm
1426 {
1427  int32_t i,j,cpu;
1428  float64_t fullmodprob=0; //for all dims
1429 
1430  //clear actual model a,b,p,q are used as numerator
1431  for (i=0; i<N; i++)
1432  {
1433  if (hmm->get_p(i)>CMath::ALMOST_NEG_INFTY)
1434  set_p(i,log(PSEUDO));
1435  else
1436  set_p(i,hmm->get_p(i));
1437  if (hmm->get_q(i)>CMath::ALMOST_NEG_INFTY)
1438  set_q(i,log(PSEUDO));
1439  else
1440  set_q(i,hmm->get_q(i));
1441 
1442  for (j=0; j<N; j++)
1443  if (hmm->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1444  set_a(i,j, log(PSEUDO));
1445  else
1446  set_a(i,j,hmm->get_a(i,j));
1447  for (j=0; j<M; j++)
1448  if (hmm->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1449  set_b(i,j, log(PSEUDO));
1450  else
1451  set_b(i,j,hmm->get_b(i,j));
1452  }
1453  invalidate_model();
1454 
1455  int32_t num_threads = parallel->get_num_threads();
1456 
1457  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1458  S_BW_THREAD_PARAM *params=SG_MALLOC(S_BW_THREAD_PARAM, num_threads);
1459 
1460  if (p_observations->get_num_vectors()<num_threads)
1461  num_threads=p_observations->get_num_vectors();
1462 
1463  for (cpu=0; cpu<num_threads; cpu++)
1464  {
1465  params[cpu].p_buf=SG_MALLOC(float64_t, N);
1466  params[cpu].q_buf=SG_MALLOC(float64_t, N);
1467  params[cpu].a_buf=SG_MALLOC(float64_t, N*N);
1468  params[cpu].b_buf=SG_MALLOC(float64_t, N*M);
1469 
1470  params[cpu].hmm=hmm;
1471  int32_t start = p_observations->get_num_vectors()*cpu / num_threads;
1472  int32_t stop=p_observations->get_num_vectors()*(cpu+1) / num_threads;
1473 
1474  if (cpu == parallel->get_num_threads()-1)
1476 
1477  ASSERT(start<stop)
1478  params[cpu].dim_start=start;
1479  params[cpu].dim_stop=stop;
1480 
1481  pthread_create(&threads[cpu], NULL, bw_dim_prefetch, &params[cpu]);
1482  }
1483 
1484  for (cpu=0; cpu<num_threads; cpu++)
1485  {
1486  pthread_join(threads[cpu], NULL);
1487 
1488  for (i=0; i<N; i++)
1489  {
1490  //estimate initial+end state distribution numerator
1491  set_p(i, CMath::logarithmic_sum(get_p(i), params[cpu].p_buf[i]));
1492  set_q(i, CMath::logarithmic_sum(get_q(i), params[cpu].q_buf[i]));
1493 
1494  //estimate numerator for a
1495  for (j=0; j<N; j++)
1496  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), params[cpu].a_buf[N*i+j]));
1497 
1498  //estimate numerator for b
1499  for (j=0; j<M; j++)
1500  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), params[cpu].b_buf[M*i+j]));
1501  }
1502 
1503  fullmodprob+=params[cpu].ret;
1504 
1505  }
1506 
1507  for (cpu=0; cpu<num_threads; cpu++)
1508  {
1509  SG_FREE(params[cpu].p_buf);
1510  SG_FREE(params[cpu].q_buf);
1511  SG_FREE(params[cpu].a_buf);
1512  SG_FREE(params[cpu].b_buf);
1513  }
1514 
1515  SG_FREE(threads);
1516  SG_FREE(params);
1517 
1518  //cache hmm model probability
1519  hmm->mod_prob=fullmodprob;
1520  hmm->mod_prob_updated=true ;
1521 
1522  //new model probability is unknown
1523  normalize();
1524  invalidate_model();
1525 }
1526 
1527 #else // USE_HMMPARALLEL
1528 
1529 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1531 {
1532  int32_t i,j,t,dim;
1533  float64_t a_sum, b_sum; //numerator
1534  float64_t dimmodprob=0; //model probability for dim
1535  float64_t fullmodprob=0; //for all dims
1536 
1537  //clear actual model a,b,p,q are used as numerator
1538  for (i=0; i<N; i++)
1539  {
1540  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1541  set_p(i,log(PSEUDO));
1542  else
1543  set_p(i,estimate->get_p(i));
1544  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1545  set_q(i,log(PSEUDO));
1546  else
1547  set_q(i,estimate->get_q(i));
1548 
1549  for (j=0; j<N; j++)
1550  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1551  set_a(i,j, log(PSEUDO));
1552  else
1553  set_a(i,j,estimate->get_a(i,j));
1554  for (j=0; j<M; j++)
1555  if (estimate->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1556  set_b(i,j, log(PSEUDO));
1557  else
1558  set_b(i,j,estimate->get_b(i,j));
1559  }
1560  invalidate_model();
1561 
1562  //change summation order to make use of alpha/beta caches
1563  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1564  {
1565  dimmodprob=estimate->model_probability(dim);
1566  fullmodprob+=dimmodprob ;
1567 
1568  for (i=0; i<N; i++)
1569  {
1570  //estimate initial+end state distribution numerator
1571  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1572  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1573 
1574  int32_t num = trans_list_backward_cnt[i] ;
1575 
1576  //estimate a
1577  for (j=0; j<num; j++)
1578  {
1579  int32_t jj = trans_list_backward[i][j] ;
1580  a_sum=-CMath::INFTY;
1581 
1582  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1583  {
1584  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1585  estimate->get_a(i,jj)+estimate->get_b(jj,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,jj,dim));
1586  }
1587  set_a(i,jj, CMath::logarithmic_sum(get_a(i,jj), a_sum-dimmodprob));
1588  }
1589 
1590  //estimate b
1591  for (j=0; j<M; j++)
1592  {
1593  b_sum=-CMath::INFTY;
1594 
1595  for (t=0; t<p_observations->get_vector_length(dim); t++)
1596  {
1597  if (p_observations->get_feature(dim,t)==j)
1598  b_sum=CMath::logarithmic_sum(b_sum, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1599  }
1600 
1601  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum-dimmodprob));
1602  }
1603  }
1604  }
1605 
1606  //cache estimate model probability
1607  estimate->mod_prob=fullmodprob;
1608  estimate->mod_prob_updated=true ;
1609 
1610  //new model probability is unknown
1611  normalize();
1612  invalidate_model();
1613 }
1614 
1615 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1617 {
1618  int32_t i,j,t,dim;
1619  float64_t a_sum, b_sum; //numerator
1620  float64_t dimmodprob=0; //model probability for dim
1621  float64_t fullmodprob=0; //for all dims
1622 
1623  //clear actual model a,b,p,q are used as numerator
1624  for (i=0; i<N; i++)
1625  {
1626  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1627  set_p(i,log(PSEUDO));
1628  else
1629  set_p(i,estimate->get_p(i));
1630  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1631  set_q(i,log(PSEUDO));
1632  else
1633  set_q(i,estimate->get_q(i));
1634 
1635  for (j=0; j<N; j++)
1636  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1637  set_a(i,j, log(PSEUDO));
1638  else
1639  set_a(i,j,estimate->get_a(i,j));
1640  for (j=0; j<M; j++)
1641  if (estimate->get_b(i,j)>CMath::ALMOST_NEG_INFTY)
1642  set_b(i,j, log(PSEUDO));
1643  else
1644  set_b(i,j,estimate->get_b(i,j));
1645  }
1646  invalidate_model();
1647 
1648  //change summation order to make use of alpha/beta caches
1649  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1650  {
1651  dimmodprob=estimate->model_probability(dim);
1652  fullmodprob+=dimmodprob ;
1653 
1654  for (i=0; i<N; i++)
1655  {
1656  //estimate initial+end state distribution numerator
1657  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1658  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1659 
1660  //estimate a
1661  for (j=0; j<N; j++)
1662  {
1663  a_sum=-CMath::INFTY;
1664 
1665  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1666  {
1667  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1668  estimate->get_a(i,j)+estimate->get_b(j,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,j,dim));
1669  }
1670  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), a_sum-dimmodprob));
1671  }
1672 
1673  //estimate b
1674  for (j=0; j<M; j++)
1675  {
1676  b_sum=-CMath::INFTY;
1677 
1678  for (t=0; t<p_observations->get_vector_length(dim); t++)
1679  {
1680  if (p_observations->get_feature(dim,t)==j)
1681  b_sum=CMath::logarithmic_sum(b_sum, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1682  }
1683 
1684  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum-dimmodprob));
1685  }
1686  }
1687  }
1688 
1689  //cache estimate model probability
1690  estimate->mod_prob=fullmodprob;
1691  estimate->mod_prob_updated=true ;
1692 
1693  //new model probability is unknown
1694  normalize();
1695  invalidate_model();
1696 }
1697 #endif // USE_HMMPARALLEL
1698 
1699 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1700 // optimize only p, q, a but not b
1702 {
1703  int32_t i,j,t,dim;
1704  float64_t a_sum; //numerator
1705  float64_t dimmodprob=0; //model probability for dim
1706  float64_t fullmodprob=0; //for all dims
1707 
1708  //clear actual model a,b,p,q are used as numerator
1709  for (i=0; i<N; i++)
1710  {
1711  if (estimate->get_p(i)>CMath::ALMOST_NEG_INFTY)
1712  set_p(i,log(PSEUDO));
1713  else
1714  set_p(i,estimate->get_p(i));
1715  if (estimate->get_q(i)>CMath::ALMOST_NEG_INFTY)
1716  set_q(i,log(PSEUDO));
1717  else
1718  set_q(i,estimate->get_q(i));
1719 
1720  for (j=0; j<N; j++)
1721  if (estimate->get_a(i,j)>CMath::ALMOST_NEG_INFTY)
1722  set_a(i,j, log(PSEUDO));
1723  else
1724  set_a(i,j,estimate->get_a(i,j));
1725  for (j=0; j<M; j++)
1726  set_b(i,j,estimate->get_b(i,j));
1727  }
1728  invalidate_model();
1729 
1730  //change summation order to make use of alpha/beta caches
1731  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1732  {
1733  dimmodprob=estimate->model_probability(dim);
1734  fullmodprob+=dimmodprob ;
1735 
1736  for (i=0; i<N; i++)
1737  {
1738  //estimate initial+end state distribution numerator
1739  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->get_p(i)+estimate->get_b(i,p_observations->get_feature(dim,0))+estimate->backward(0,i,dim) - dimmodprob));
1740  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+estimate->get_q(i) - dimmodprob ));
1741 
1742  int32_t num = trans_list_backward_cnt[i] ;
1743  //estimate a
1744  for (j=0; j<num; j++)
1745  {
1746  int32_t jj = trans_list_backward[i][j] ;
1747  a_sum=-CMath::INFTY;
1748 
1749  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1750  {
1751  a_sum= CMath::logarithmic_sum(a_sum, estimate->forward(t,i,dim)+
1752  estimate->get_a(i,jj)+estimate->get_b(jj,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,jj,dim));
1753  }
1754  set_a(i,jj, CMath::logarithmic_sum(get_a(i,jj), a_sum-dimmodprob));
1755  }
1756  }
1757  }
1758 
1759  //cache estimate model probability
1760  estimate->mod_prob=fullmodprob;
1761  estimate->mod_prob_updated=true ;
1762 
1763  //new model probability is unknown
1764  normalize();
1765  invalidate_model();
1766 }
1767 
1768 
1769 
1770 //estimates new model lambda out of lambda_estimate using baum welch algorithm
1772 {
1773  int32_t i,j,old_i,k,t,dim;
1774  float64_t a_sum_num, b_sum_num; //numerator
1775  float64_t a_sum_denom, b_sum_denom; //denominator
1776  float64_t dimmodprob=-CMath::INFTY; //model probability for dim
1777  float64_t fullmodprob=0; //for all dims
1778  float64_t* A=ARRAYN1(0);
1779  float64_t* B=ARRAYN2(0);
1780 
1781  //clear actual model a,b,p,q are used as numerator
1782  //A,B as denominator for a,b
1783  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1784  set_p(i,log(PSEUDO));
1785 
1786  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1787  set_q(i,log(PSEUDO));
1788 
1789  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1790  {
1791  j=model->get_learn_a(k,1);
1792  set_a(i,j, log(PSEUDO));
1793  }
1794 
1795  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1796  {
1797  j=model->get_learn_b(k,1);
1798  set_b(i,j, log(PSEUDO));
1799  }
1800 
1801  for (i=0; i<N; i++)
1802  {
1803  A[i]=log(PSEUDO);
1804  B[i]=log(PSEUDO);
1805  }
1806 
1807 #ifdef USE_HMMPARALLEL
1808  int32_t num_threads = parallel->get_num_threads();
1809  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1810  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
1811 
1812  if (p_observations->get_num_vectors()<num_threads)
1813  num_threads=p_observations->get_num_vectors();
1814 #endif
1815 
1816  //change summation order to make use of alpha/beta caches
1817  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
1818  {
1819 #ifdef USE_HMMPARALLEL
1820  if (dim%num_threads==0)
1821  {
1822  for (i=0; i<num_threads; i++)
1823  {
1824  if (dim+i<p_observations->get_num_vectors())
1825  {
1826  params[i].hmm=estimate ;
1827  params[i].dim=dim+i ;
1828  pthread_create(&threads[i], NULL, bw_single_dim_prefetch, (void*)&params[i]) ;
1829  }
1830  }
1831  for (i=0; i<num_threads; i++)
1832  {
1833  if (dim+i<p_observations->get_num_vectors())
1834  {
1835  pthread_join(threads[i], NULL);
1836  dimmodprob = params[i].prob_sum;
1837  }
1838  }
1839  }
1840 #else
1841  dimmodprob=estimate->model_probability(dim);
1842 #endif // USE_HMMPARALLEL
1843 
1844  //and denominator
1845  fullmodprob+= dimmodprob;
1846 
1847  //estimate initial+end state distribution numerator
1848  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1849  set_p(i, CMath::logarithmic_sum(get_p(i), estimate->forward(0,i,dim)+estimate->backward(0,i,dim) - dimmodprob ) );
1850 
1851  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1852  set_q(i, CMath::logarithmic_sum(get_q(i), estimate->forward(p_observations->get_vector_length(dim)-1, i, dim)+
1853  estimate->backward(p_observations->get_vector_length(dim)-1, i, dim) - dimmodprob ) );
1854 
1855  //estimate a
1856  old_i=-1;
1857  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1858  {
1859  //denominator is constant for j
1860  //therefore calculate it first
1861  if (old_i!=i)
1862  {
1863  old_i=i;
1864  a_sum_denom=-CMath::INFTY;
1865 
1866  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1867  a_sum_denom= CMath::logarithmic_sum(a_sum_denom, estimate->forward(t,i,dim)+estimate->backward(t,i,dim));
1868 
1869  A[i]= CMath::logarithmic_sum(A[i], a_sum_denom-dimmodprob);
1870  }
1871 
1872  j=model->get_learn_a(k,1);
1873  a_sum_num=-CMath::INFTY;
1874  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
1875  {
1876  a_sum_num= CMath::logarithmic_sum(a_sum_num, estimate->forward(t,i,dim)+
1877  estimate->get_a(i,j)+estimate->get_b(j,p_observations->get_feature(dim,t+1))+estimate->backward(t+1,j,dim));
1878  }
1879 
1880  set_a(i,j, CMath::logarithmic_sum(get_a(i,j), a_sum_num-dimmodprob));
1881  }
1882 
1883  //estimate b
1884  old_i=-1;
1885  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1886  {
1887 
1888  //denominator is constant for j
1889  //therefore calculate it first
1890  if (old_i!=i)
1891  {
1892  old_i=i;
1893  b_sum_denom=-CMath::INFTY;
1894 
1895  for (t=0; t<p_observations->get_vector_length(dim); t++)
1896  b_sum_denom= CMath::logarithmic_sum(b_sum_denom, estimate->forward(t,i,dim)+estimate->backward(t,i,dim));
1897 
1898  B[i]= CMath::logarithmic_sum(B[i], b_sum_denom-dimmodprob);
1899  }
1900 
1901  j=model->get_learn_b(k,1);
1902  b_sum_num=-CMath::INFTY;
1903  for (t=0; t<p_observations->get_vector_length(dim); t++)
1904  {
1905  if (p_observations->get_feature(dim,t)==j)
1906  b_sum_num=CMath::logarithmic_sum(b_sum_num, estimate->forward(t,i,dim)+estimate->backward(t, i, dim));
1907  }
1908 
1909  set_b(i,j, CMath::logarithmic_sum(get_b(i,j), b_sum_num-dimmodprob));
1910  }
1911  }
1912 #ifdef USE_HMMPARALLEL
1913  SG_FREE(threads);
1914  SG_FREE(params);
1915 #endif
1916 
1917 
1918  //calculate estimates
1919  for (k=0; (i=model->get_learn_p(k))!=-1; k++)
1920  set_p(i, get_p(i)-log(p_observations->get_num_vectors()+N*PSEUDO) );
1921 
1922  for (k=0; (i=model->get_learn_q(k))!=-1; k++)
1923  set_q(i, get_q(i)-log(p_observations->get_num_vectors()+N*PSEUDO) );
1924 
1925  for (k=0; (i=model->get_learn_a(k,0))!=-1; k++)
1926  {
1927  j=model->get_learn_a(k,1);
1928  set_a(i,j, get_a(i,j) - A[i]);
1929  }
1930 
1931  for (k=0; (i=model->get_learn_b(k,0))!=-1; k++)
1932  {
1933  j=model->get_learn_b(k,1);
1934  set_b(i,j, get_b(i,j) - B[i]);
1935  }
1936 
1937  //cache estimate model probability
1938  estimate->mod_prob=fullmodprob;
1939  estimate->mod_prob_updated=true ;
1940 
1941  //new model probability is unknown
1942  normalize();
1943  invalidate_model();
1944 }
1945 
1946 //estimates new model lambda out of lambda_estimate using viterbi algorithm
1948 {
1949  int32_t i,j,t;
1950  float64_t sum;
1951  float64_t* P=ARRAYN1(0);
1952  float64_t* Q=ARRAYN2(0);
1953 
1954  path_deriv_updated=false ;
1955 
1956  //initialize with pseudocounts
1957  for (i=0; i<N; i++)
1958  {
1959  for (j=0; j<N; j++)
1960  set_A(i,j, PSEUDO);
1961 
1962  for (j=0; j<M; j++)
1963  set_B(i,j, PSEUDO);
1964 
1965  P[i]=PSEUDO;
1966  Q[i]=PSEUDO;
1967  }
1968 
1969  float64_t allpatprob=0 ;
1970 
1971 #ifdef USE_HMMPARALLEL
1972  int32_t num_threads = parallel->get_num_threads();
1973  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
1974  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
1975 
1976  if (p_observations->get_num_vectors()<num_threads)
1977  num_threads=p_observations->get_num_vectors();
1978 #endif
1979 
1980  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
1981  {
1982 
1983 #ifdef USE_HMMPARALLEL
1984  if (dim%num_threads==0)
1985  {
1986  for (i=0; i<num_threads; i++)
1987  {
1988  if (dim+i<p_observations->get_num_vectors())
1989  {
1990  params[i].hmm=estimate ;
1991  params[i].dim=dim+i ;
1992  pthread_create(&threads[i], NULL, vit_dim_prefetch, (void*)&params[i]) ;
1993  }
1994  }
1995  for (i=0; i<num_threads; i++)
1996  {
1997  if (dim+i<p_observations->get_num_vectors())
1998  {
1999  pthread_join(threads[i], NULL);
2000  allpatprob += params[i].prob_sum;
2001  }
2002  }
2003  }
2004 #else
2005  //using viterbi to find best path
2006  allpatprob += estimate->best_path(dim);
2007 #endif // USE_HMMPARALLEL
2008 
2009  //counting occurences for A and B
2010  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
2011  {
2012  set_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1], get_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1])+1);
2013  set_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t), get_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t))+1);
2014  }
2015 
2017 
2018  P[estimate->PATH(dim)[0]]++;
2019  Q[estimate->PATH(dim)[p_observations->get_vector_length(dim)-1]]++;
2020  }
2021 
2022 #ifdef USE_HMMPARALLEL
2023  SG_FREE(threads);
2024  SG_FREE(params);
2025 #endif
2026 
2027  allpatprob/=p_observations->get_num_vectors() ;
2028  estimate->all_pat_prob=allpatprob ;
2029  estimate->all_path_prob_updated=true ;
2030 
2031  //converting A to probability measure a
2032  for (i=0; i<N; i++)
2033  {
2034  sum=0;
2035  for (j=0; j<N; j++)
2036  sum+=get_A(i,j);
2037 
2038  for (j=0; j<N; j++)
2039  set_a(i,j, log(get_A(i,j)/sum));
2040  }
2041 
2042  //converting B to probability measures b
2043  for (i=0; i<N; i++)
2044  {
2045  sum=0;
2046  for (j=0; j<M; j++)
2047  sum+=get_B(i,j);
2048 
2049  for (j=0; j<M; j++)
2050  set_b(i,j, log(get_B(i, j)/sum));
2051  }
2052 
2053  //converting P to probability measure p
2054  sum=0;
2055  for (i=0; i<N; i++)
2056  sum+=P[i];
2057 
2058  for (i=0; i<N; i++)
2059  set_p(i, log(P[i]/sum));
2060 
2061  //converting Q to probability measure q
2062  sum=0;
2063  for (i=0; i<N; i++)
2064  sum+=Q[i];
2065 
2066  for (i=0; i<N; i++)
2067  set_q(i, log(Q[i]/sum));
2068 
2069  //new model probability is unknown
2070  invalidate_model();
2071 }
2072 
2073 // estimate parameters listed in learn_x
2075 {
2076  int32_t i,j,k,t;
2077  float64_t sum;
2078  float64_t* P=ARRAYN1(0);
2079  float64_t* Q=ARRAYN2(0);
2080 
2081  path_deriv_updated=false ;
2082 
2083  //initialize with pseudocounts
2084  for (i=0; i<N; i++)
2085  {
2086  for (j=0; j<N; j++)
2087  set_A(i,j, PSEUDO);
2088 
2089  for (j=0; j<M; j++)
2090  set_B(i,j, PSEUDO);
2091 
2092  P[i]=PSEUDO;
2093  Q[i]=PSEUDO;
2094  }
2095 
2096 #ifdef USE_HMMPARALLEL
2097  int32_t num_threads = parallel->get_num_threads();
2098  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
2099  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
2100 #endif
2101 
2102  float64_t allpatprob=0.0 ;
2103  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
2104  {
2105 
2106 #ifdef USE_HMMPARALLEL
2107  if (dim%num_threads==0)
2108  {
2109  for (i=0; i<num_threads; i++)
2110  {
2111  if (dim+i<p_observations->get_num_vectors())
2112  {
2113  params[i].hmm=estimate ;
2114  params[i].dim=dim+i ;
2115  pthread_create(&threads[i], NULL, vit_dim_prefetch, (void*)&params[i]) ;
2116  }
2117  }
2118  for (i=0; i<num_threads; i++)
2119  {
2120  if (dim+i<p_observations->get_num_vectors())
2121  {
2122  pthread_join(threads[i], NULL);
2123  allpatprob += params[i].prob_sum;
2124  }
2125  }
2126  }
2127 #else // USE_HMMPARALLEL
2128  //using viterbi to find best path
2129  allpatprob += estimate->best_path(dim);
2130 #endif // USE_HMMPARALLEL
2131 
2132 
2133  //counting occurences for A and B
2134  for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
2135  {
2136  set_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1], get_A(estimate->PATH(dim)[t], estimate->PATH(dim)[t+1])+1);
2137  set_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t), get_B(estimate->PATH(dim)[t], p_observations->get_feature(dim,t))+1);
2138  }
2139 
2141 
2142  P[estimate->PATH(dim)[0]]++;
2143  Q[estimate->PATH(dim)[p_observations->get_vector_length(dim)-1]]++;
2144  }
2145 
2146 #ifdef USE_HMMPARALLEL
2147  SG_FREE(threads);
2148  SG_FREE(params);
2149 #endif
2150 
2151  //estimate->invalidate_model() ;
2152  //float64_t q=estimate->best_path(-1) ;
2153 
2154  allpatprob/=p_observations->get_num_vectors() ;
2155  estimate->all_pat_prob=allpatprob ;
2156  estimate->all_path_prob_updated=true ;
2157 
2158 
2159  //copy old model
2160  for (i=0; i<N; i++)
2161  {
2162  for (j=0; j<N; j++)
2163  set_a(i,j, estimate->get_a(i,j));
2164 
2165  for (j=0; j<M; j++)
2166  set_b(i,j, estimate->get_b(i,j));
2167  }
2168 
2169  //converting A to probability measure a
2170  i=0;
2171  sum=0;
2172  j=model->get_learn_a(i,0);
2173  k=i;
2174  while (model->get_learn_a(i,0)!=-1 || k<i)
2175  {
2176  if (j==model->get_learn_a(i,0))
2177  {
2178  sum+=get_A(model->get_learn_a(i,0), model->get_learn_a(i,1));
2179  i++;
2180  }
2181  else
2182  {
2183  while (k<i)
2184  {
2185  set_a(model->get_learn_a(k,0), model->get_learn_a(k,1), log (get_A(model->get_learn_a(k,0), model->get_learn_a(k,1)) / sum));
2186  k++;
2187  }
2188 
2189  sum=0;
2190  j=model->get_learn_a(i,0);
2191  k=i;
2192  }
2193  }
2194 
2195  //converting B to probability measures b
2196  i=0;
2197  sum=0;
2198  j=model->get_learn_b(i,0);
2199  k=i;
2200  while (model->get_learn_b(i,0)!=-1 || k<i)
2201  {
2202  if (j==model->get_learn_b(i,0))
2203  {
2204  sum+=get_B(model->get_learn_b(i,0),model->get_learn_b(i,1));
2205  i++;
2206  }
2207  else
2208  {
2209  while (k<i)
2210  {
2211  set_b(model->get_learn_b(k,0),model->get_learn_b(k,1), log (get_B(model->get_learn_b(k,0), model->get_learn_b(k,1)) / sum));
2212  k++;
2213  }
2214 
2215  sum=0;
2216  j=model->get_learn_b(i,0);
2217  k=i;
2218  }
2219  }
2220 
2221  i=0;
2222  sum=0;
2223  while (model->get_learn_p(i)!=-1)
2224  {
2225  sum+=P[model->get_learn_p(i)] ;
2226  i++ ;
2227  } ;
2228  i=0 ;
2229  while (model->get_learn_p(i)!=-1)
2230  {
2231  set_p(model->get_learn_p(i), log(P[model->get_learn_p(i)]/sum));
2232  i++ ;
2233  } ;
2234 
2235  i=0;
2236  sum=0;
2237  while (model->get_learn_q(i)!=-1)
2238  {
2239  sum+=Q[model->get_learn_q(i)] ;
2240  i++ ;
2241  } ;
2242  i=0 ;
2243  while (model->get_learn_q(i)!=-1)
2244  {
2245  set_q(model->get_learn_q(i), log(Q[model->get_learn_q(i)]/sum));
2246  i++ ;
2247  } ;
2248 
2249 
2250  //new model probability is unknown
2251  invalidate_model();
2252 }
2253 //------------------------------------------------------------------------------------//
2254 
2255 //to give an idea what the model looks like
2256 void CHMM::output_model(bool verbose)
2257 {
2258  int32_t i,j;
2259  float64_t checksum;
2260 
2261  //generic info
2262  SG_INFO("log(Pr[O|model])=%e, #states: %i, #observationssymbols: %i, #observations: %ix%i\n",
2265 
2266  if (verbose)
2267  {
2268  // tranisition matrix a
2269  SG_INFO("\ntransition matrix\n")
2270  for (i=0; i<N; i++)
2271  {
2272  checksum= get_q(i);
2273  for (j=0; j<N; j++)
2274  {
2275  checksum= CMath::logarithmic_sum(checksum, get_a(i,j));
2276 
2277  SG_INFO("a(%02i,%02i)=%1.4f ",i,j, (float32_t) exp(get_a(i,j)))
2278 
2279  if (j % 4 == 3)
2280  SG_PRINT("\n")
2281  }
2282  if (fabs(checksum)>1e-5)
2283  SG_DEBUG(" checksum % E ******* \n",checksum)
2284  else
2285  SG_DEBUG(" checksum % E\n",checksum)
2286  }
2287 
2288  // distribution of start states p
2289  SG_INFO("\ndistribution of start states\n")
2290  checksum=-CMath::INFTY;
2291  for (i=0; i<N; i++)
2292  {
2293  checksum= CMath::logarithmic_sum(checksum, get_p(i));
2294  SG_INFO("p(%02i)=%1.4f ",i, (float32_t) exp(get_p(i)))
2295  if (i % 4 == 3)
2296  SG_PRINT("\n")
2297  }
2298  if (fabs(checksum)>1e-5)
2299  SG_DEBUG(" checksum % E ******* \n",checksum)
2300  else
2301  SG_DEBUG(" checksum=% E\n", checksum)
2302 
2303  // distribution of terminal states p
2304  SG_INFO("\ndistribution of terminal states\n")
2305  checksum=-CMath::INFTY;
2306  for (i=0; i<N; i++)
2307  {
2308  checksum= CMath::logarithmic_sum(checksum, get_q(i));
2309  SG_INFO("q(%02i)=%1.4f ",i, (float32_t) exp(get_q(i)))
2310  if (i % 4 == 3)
2311  SG_INFO("\n")
2312  }
2313  if (fabs(checksum)>1e-5)
2314  SG_DEBUG(" checksum % E ******* \n",checksum)
2315  else
2316  SG_DEBUG(" checksum=% E\n", checksum)
2317 
2318  // distribution of observations given the state b
2319  SG_INFO("\ndistribution of observations given the state\n")
2320  for (i=0; i<N; i++)
2321  {
2322  checksum=-CMath::INFTY;
2323  for (j=0; j<M; j++)
2324  {
2325  checksum=CMath::logarithmic_sum(checksum, get_b(i,j));
2326  SG_INFO("b(%02i,%02i)=%1.4f ",i,j, (float32_t) exp(get_b(i,j)))
2327  if (j % 4 == 3)
2328  SG_PRINT("\n")
2329  }
2330  if (fabs(checksum)>1e-5)
2331  SG_DEBUG(" checksum % E ******* \n",checksum)
2332  else
2333  SG_DEBUG(" checksum % E\n",checksum)
2334  }
2335  }
2336  SG_PRINT("\n")
2337 }
2338 
2339 //to give an idea what the model looks like
2340 void CHMM::output_model_defined(bool verbose)
2341 {
2342  int32_t i,j;
2343  if (!model)
2344  return ;
2345 
2346  //generic info
2347  SG_INFO("log(Pr[O|model])=%e, #states: %i, #observationssymbols: %i, #observations: %ix%i\n",
2350 
2351  if (verbose)
2352  {
2353  // tranisition matrix a
2354  SG_INFO("\ntransition matrix\n")
2355 
2356  //initialize a values that have to be learned
2357  i=0;
2358  j=model->get_learn_a(i,0);
2359  while (model->get_learn_a(i,0)!=-1)
2360  {
2361  if (j!=model->get_learn_a(i,0))
2362  {
2363  j=model->get_learn_a(i,0);
2364  SG_PRINT("\n")
2365  }
2366 
2367  SG_INFO("a(%02i,%02i)=%1.4f ",model->get_learn_a(i,0), model->get_learn_a(i,1), (float32_t) exp(get_a(model->get_learn_a(i,0), model->get_learn_a(i,1))))
2368  i++;
2369  }
2370 
2371  // distribution of observations given the state b
2372  SG_INFO("\n\ndistribution of observations given the state\n")
2373  i=0;
2374  j=model->get_learn_b(i,0);
2375  while (model->get_learn_b(i,0)!=-1)
2376  {
2377  if (j!=model->get_learn_b(i,0))
2378  {
2379  j=model->get_learn_b(i,0);
2380  SG_PRINT("\n")
2381  }
2382 
2383  SG_INFO("b(%02i,%02i)=%1.4f ",model->get_learn_b(i,0),model->get_learn_b(i,1), (float32_t) exp(get_b(model->get_learn_b(i,0),model->get_learn_b(i,1))))
2384  i++;
2385  }
2386 
2387  SG_PRINT("\n")
2388  }
2389  SG_PRINT("\n")
2390 }
2391 
2392 //------------------------------------------------------------------------------------//
2393 
2394 //convert model to log probabilities
2396 {
2397  int32_t i,j;
2398 
2399  for (i=0; i<N; i++)
2400  {
2401  if (get_p(i)!=0)
2402  set_p(i, log(get_p(i)));
2403  else
2404  set_p(i, -CMath::INFTY);;
2405  }
2406 
2407  for (i=0; i<N; i++)
2408  {
2409  if (get_q(i)!=0)
2410  set_q(i, log(get_q(i)));
2411  else
2412  set_q(i, -CMath::INFTY);;
2413  }
2414 
2415  for (i=0; i<N; i++)
2416  {
2417  for (j=0; j<N; j++)
2418  {
2419  if (get_a(i,j)!=0)
2420  set_a(i,j, log(get_a(i,j)));
2421  else
2422  set_a(i,j, -CMath::INFTY);
2423  }
2424  }
2425 
2426  for (i=0; i<N; i++)
2427  {
2428  for (j=0; j<M; j++)
2429  {
2430  if (get_b(i,j)!=0)
2431  set_b(i,j, log(get_b(i,j)));
2432  else
2433  set_b(i,j, -CMath::INFTY);
2434  }
2435  }
2436  loglikelihood=true;
2437 
2438  invalidate_model();
2439 }
2440 
2441 //init model with random values
2443 {
2444  const float64_t MIN_RAND=23e-3;
2445 
2446  float64_t sum;
2447  int32_t i,j;
2448 
2449  //initialize a with random values
2450  for (i=0; i<N; i++)
2451  {
2452  sum=0;
2453  for (j=0; j<N; j++)
2454  {
2455  set_a(i,j, CMath::random(MIN_RAND, 1.0));
2456 
2457  sum+=get_a(i,j);
2458  }
2459 
2460  for (j=0; j<N; j++)
2461  set_a(i,j, get_a(i,j)/sum);
2462  }
2463 
2464  //initialize pi with random values
2465  sum=0;
2466  for (i=0; i<N; i++)
2467  {
2468  set_p(i, CMath::random(MIN_RAND, 1.0));
2469 
2470  sum+=get_p(i);
2471  }
2472 
2473  for (i=0; i<N; i++)
2474  set_p(i, get_p(i)/sum);
2475 
2476  //initialize q with random values
2477  sum=0;
2478  for (i=0; i<N; i++)
2479  {
2480  set_q(i, CMath::random(MIN_RAND, 1.0));
2481 
2482  sum+=get_q(i);
2483  }
2484 
2485  for (i=0; i<N; i++)
2486  set_q(i, get_q(i)/sum);
2487 
2488  //initialize b with random values
2489  for (i=0; i<N; i++)
2490  {
2491  sum=0;
2492  for (j=0; j<M; j++)
2493  {
2494  set_b(i,j, CMath::random(MIN_RAND, 1.0));
2495 
2496  sum+=get_b(i,j);
2497  }
2498 
2499  for (j=0; j<M; j++)
2500  set_b(i,j, get_b(i,j)/sum);
2501  }
2502 
2503  //initialize pat/mod_prob as not calculated
2504  invalidate_model();
2505 }
2506 
2507 //init model according to const_x
2509 {
2510  int32_t i,j,k,r;
2511  float64_t sum;
2512  const float64_t MIN_RAND=23e-3;
2513 
2514  //initialize a with zeros
2515  for (i=0; i<N; i++)
2516  for (j=0; j<N; j++)
2517  set_a(i,j, 0);
2518 
2519  //initialize p with zeros
2520  for (i=0; i<N; i++)
2521  set_p(i, 0);
2522 
2523  //initialize q with zeros
2524  for (i=0; i<N; i++)
2525  set_q(i, 0);
2526 
2527  //initialize b with zeros
2528  for (i=0; i<N; i++)
2529  for (j=0; j<M; j++)
2530  set_b(i,j, 0);
2531 
2532 
2533  //initialize a values that have to be learned
2534  float64_t *R=SG_MALLOC(float64_t, N);
2535  for (r=0; r<N; r++) R[r]=CMath::random(MIN_RAND,1.0);
2536  i=0; sum=0; k=i;
2537  j=model->get_learn_a(i,0);
2538  while (model->get_learn_a(i,0)!=-1 || k<i)
2539  {
2540  if (j==model->get_learn_a(i,0))
2541  {
2542  sum+=R[model->get_learn_a(i,1)] ;
2543  i++;
2544  }
2545  else
2546  {
2547  while (k<i)
2548  {
2549  set_a(model->get_learn_a(k,0), model->get_learn_a(k,1),
2550  R[model->get_learn_a(k,1)]/sum);
2551  k++;
2552  }
2553  j=model->get_learn_a(i,0);
2554  k=i;
2555  sum=0;
2556  for (r=0; r<N; r++) R[r]=CMath::random(MIN_RAND,1.0);
2557  }
2558  }
2559  SG_FREE(R); R=NULL ;
2560 
2561  //initialize b values that have to be learned
2562  R=SG_MALLOC(float64_t, M);
2563  for (r=0; r<M; r++) R[r]=CMath::random(MIN_RAND,1.0);
2564  i=0; sum=0; k=0 ;
2565  j=model->get_learn_b(i,0);
2566  while (model->get_learn_b(i,0)!=-1 || k<i)
2567  {
2568  if (j==model->get_learn_b(i,0))
2569  {
2570  sum+=R[model->get_learn_b(i,1)] ;
2571  i++;
2572  }
2573  else
2574  {
2575  while (k<i)
2576  {
2577  set_b(model->get_learn_b(k,0),model->get_learn_b(k,1),
2578  R[model->get_learn_b(k,1)]/sum);
2579  k++;
2580  }
2581 
2582  j=model->get_learn_b(i,0);
2583  k=i;
2584  sum=0;
2585  for (r=0; r<M; r++) R[r]=CMath::random(MIN_RAND,1.0);
2586  }
2587  }
2588  SG_FREE(R); R=NULL ;
2589 
2590  //set consts into a
2591  i=0;
2592  while (model->get_const_a(i,0) != -1)
2593  {
2595  i++;
2596  }
2597 
2598  //set consts into b
2599  i=0;
2600  while (model->get_const_b(i,0) != -1)
2601  {
2603  i++;
2604  }
2605 
2606  //set consts into p
2607  i=0;
2608  while (model->get_const_p(i) != -1)
2609  {
2611  i++;
2612  }
2613 
2614  //initialize q with zeros
2615  for (i=0; i<N; i++)
2616  set_q(i, 0.0);
2617 
2618  //set consts into q
2619  i=0;
2620  while (model->get_const_q(i) != -1)
2621  {
2623  i++;
2624  }
2625 
2626  // init p
2627  i=0;
2628  sum=0;
2629  while (model->get_learn_p(i)!=-1)
2630  {
2631  set_p(model->get_learn_p(i),CMath::random(MIN_RAND,1.0)) ;
2632  sum+=get_p(model->get_learn_p(i)) ;
2633  i++ ;
2634  } ;
2635  i=0 ;
2636  while (model->get_learn_p(i)!=-1)
2637  {
2638  set_p(model->get_learn_p(i), get_p(model->get_learn_p(i))/sum);
2639  i++ ;
2640  } ;
2641 
2642  // initialize q
2643  i=0;
2644  sum=0;
2645  while (model->get_learn_q(i)!=-1)
2646  {
2647  set_q(model->get_learn_q(i),CMath::random(MIN_RAND,1.0)) ;
2648  sum+=get_q(model->get_learn_q(i)) ;
2649  i++ ;
2650  } ;
2651  i=0 ;
2652  while (model->get_learn_q(i)!=-1)
2653  {
2654  set_q(model->get_learn_q(i), get_q(model->get_learn_q(i))/sum);
2655  i++ ;
2656  } ;
2657 
2658  //initialize pat/mod_prob as not calculated
2659  invalidate_model();
2660 }
2661 
2663 {
2664  int32_t i,j;
2665  for (i=0; i<N; i++)
2666  {
2667  set_p(i, log(PSEUDO));
2668  set_q(i, log(PSEUDO));
2669 
2670  for (j=0; j<N; j++)
2671  set_a(i,j, log(PSEUDO));
2672 
2673  for (j=0; j<M; j++)
2674  set_b(i,j, log(PSEUDO));
2675  }
2676 }
2677 
2679 {
2680  int32_t i,j,k;
2681 
2682  for (i=0; (j=model->get_learn_p(i))!=-1; i++)
2683  set_p(j, log(PSEUDO));
2684 
2685  for (i=0; (j=model->get_learn_q(i))!=-1; i++)
2686  set_q(j, log(PSEUDO));
2687 
2688  for (i=0; (j=model->get_learn_a(i,0))!=-1; i++)
2689  {
2690  k=model->get_learn_a(i,1); // catch (j,k) as indizes to be learned
2691  set_a(j,k, log(PSEUDO));
2692  }
2693 
2694  for (i=0; (j=model->get_learn_b(i,0))!=-1; i++)
2695  {
2696  k=model->get_learn_b(i,1); // catch (j,k) as indizes to be learned
2697  set_b(j,k, log(PSEUDO));
2698  }
2699 }
2700 
2702 {
2703  int32_t i,j;
2704  for (i=0; i<N; i++)
2705  {
2706  set_p(i, l->get_p(i));
2707  set_q(i, l->get_q(i));
2708 
2709  for (j=0; j<N; j++)
2710  set_a(i,j, l->get_a(i,j));
2711 
2712  for (j=0; j<M; j++)
2713  set_b(i,j, l->get_b(i,j));
2714  }
2715 }
2716 
2718 {
2719  //initialize pat/mod_prob/alpha/beta cache as not calculated
2720  this->mod_prob=0.0;
2721  this->mod_prob_updated=false;
2722 
2723  if (mem_initialized)
2724  {
2725  if (trans_list_forward_cnt)
2726  SG_FREE(trans_list_forward_cnt);
2727  trans_list_forward_cnt=NULL ;
2728  if (trans_list_backward_cnt)
2729  SG_FREE(trans_list_backward_cnt);
2730  trans_list_backward_cnt=NULL ;
2731  if (trans_list_forward)
2732  {
2733  for (int32_t i=0; i<trans_list_len; i++)
2734  if (trans_list_forward[i])
2735  SG_FREE(trans_list_forward[i]);
2736  SG_FREE(trans_list_forward);
2737  trans_list_forward=NULL ;
2738  }
2739  if (trans_list_backward)
2740  {
2741  for (int32_t i=0; i<trans_list_len; i++)
2742  if (trans_list_backward[i])
2743  SG_FREE(trans_list_backward[i]);
2744  SG_FREE(trans_list_backward);
2745  trans_list_backward = NULL ;
2746  } ;
2747 
2748  trans_list_len = N ;
2749  trans_list_forward = SG_MALLOC(T_STATES*, N);
2750  trans_list_forward_cnt = SG_MALLOC(T_STATES, N);
2751 
2752  for (int32_t j=0; j<N; j++)
2753  {
2754  trans_list_forward_cnt[j]= 0 ;
2755  trans_list_forward[j]= SG_MALLOC(T_STATES, N);
2756  for (int32_t i=0; i<N; i++)
2757  if (get_a(i,j)>CMath::ALMOST_NEG_INFTY)
2758  {
2759  trans_list_forward[j][trans_list_forward_cnt[j]]=i ;
2760  trans_list_forward_cnt[j]++ ;
2761  }
2762  } ;
2763 
2764  trans_list_backward = SG_MALLOC(T_STATES*, N);
2765  trans_list_backward_cnt = SG_MALLOC(T_STATES, N);
2766 
2767  for (int32_t i=0; i<N; i++)
2768  {
2769  trans_list_backward_cnt[i]= 0 ;
2770  trans_list_backward[i]= SG_MALLOC(T_STATES, N);
2771  for (int32_t j=0; j<N; j++)
2772  if (get_a(i,j)>CMath::ALMOST_NEG_INFTY)
2773  {
2774  trans_list_backward[i][trans_list_backward_cnt[i]]=j ;
2775  trans_list_backward_cnt[i]++ ;
2776  }
2777  } ;
2778  } ;
2779  this->all_pat_prob=0.0;
2780  this->pat_prob=0.0;
2781  this->path_deriv_updated=false ;
2782  this->path_deriv_dimension=-1 ;
2783  this->all_path_prob_updated=false;
2784 
2785 #ifdef USE_HMMPARALLEL_STRUCTURES
2786  {
2787  for (int32_t i=0; i<parallel->get_num_threads(); i++)
2788  {
2789  this->alpha_cache[i].updated=false;
2790  this->beta_cache[i].updated=false;
2791  path_prob_updated[i]=false ;
2792  path_prob_dimension[i]=-1 ;
2793  } ;
2794  }
2795 #else // USE_HMMPARALLEL_STRUCTURES
2796  this->alpha_cache.updated=false;
2797  this->beta_cache.updated=false;
2798  this->path_prob_dimension=-1;
2799  this->path_prob_updated=false;
2800 
2801 #endif // USE_HMMPARALLEL_STRUCTURES
2802 }
2803 
2804 void CHMM::open_bracket(FILE* file)
2805 {
2806  int32_t value;
2807  while (((value=fgetc(file)) != EOF) && (value!='[')) //skip possible spaces and end if '[' occurs
2808  {
2809  if (value=='\n')
2810  line++;
2811  }
2812 
2813  if (value==EOF)
2814  error(line, "expected \"[\" in input file");
2815 
2816  while (((value=fgetc(file)) != EOF) && (isspace(value))) //skip possible spaces
2817  {
2818  if (value=='\n')
2819  line++;
2820  }
2821 
2822  ungetc(value, file);
2823 }
2824 
2825 void CHMM::close_bracket(FILE* file)
2826 {
2827  int32_t value;
2828  while (((value=fgetc(file)) != EOF) && (value!=']')) //skip possible spaces and end if ']' occurs
2829  {
2830  if (value=='\n')
2831  line++;
2832  }
2833 
2834  if (value==EOF)
2835  error(line, "expected \"]\" in input file");
2836 }
2837 
2838 bool CHMM::comma_or_space(FILE* file)
2839 {
2840  int32_t value;
2841  while (((value=fgetc(file)) != EOF) && (value!=',') && (value!=';') && (value!=']')) //skip possible spaces and end if ',' or ';' occurs
2842  {
2843  if (value=='\n')
2844  line++;
2845  }
2846  if (value==']')
2847  {
2848  ungetc(value, file);
2849  SG_ERROR("found ']' instead of ';' or ','\n")
2850  return false ;
2851  } ;
2852 
2853  if (value==EOF)
2854  error(line, "expected \";\" or \",\" in input file");
2855 
2856  while (((value=fgetc(file)) != EOF) && (isspace(value))) //skip possible spaces
2857  {
2858  if (value=='\n')
2859  line++;
2860  }
2861  ungetc(value, file);
2862  return true ;
2863 }
2864 
2865 bool CHMM::get_numbuffer(FILE* file, char* buffer, int32_t length)
2866 {
2867  signed char value;
2868 
2869  while (((value=fgetc(file)) != EOF) &&
2870  !isdigit(value) && (value!='A')
2871  && (value!='C') && (value!='G') && (value!='T')
2872  && (value!='N') && (value!='n')
2873  && (value!='.') && (value!='-') && (value!='e') && (value!=']')) //skip possible spaces+crap
2874  {
2875  if (value=='\n')
2876  line++;
2877  }
2878  if (value==']')
2879  {
2880  ungetc(value,file) ;
2881  return false ;
2882  } ;
2883  if (value!=EOF)
2884  {
2885  int32_t i=0;
2886  switch (value)
2887  {
2888  case 'A':
2889  value='0' +CAlphabet::B_A;
2890  break;
2891  case 'C':
2892  value='0' +CAlphabet::B_C;
2893  break;
2894  case 'G':
2895  value='0' +CAlphabet::B_G;
2896  break;
2897  case 'T':
2898  value='0' +CAlphabet::B_T;
2899  break;
2900  };
2901 
2902  buffer[i++]=value;
2903 
2904  while (((value=fgetc(file)) != EOF) &&
2905  (isdigit(value) || (value=='.') || (value=='-') || (value=='e')
2906  || (value=='A') || (value=='C') || (value=='G')|| (value=='T')
2907  || (value=='N') || (value=='n')) && (i<length))
2908  {
2909  switch (value)
2910  {
2911  case 'A':
2912  value='0' +CAlphabet::B_A;
2913  break;
2914  case 'C':
2915  value='0' +CAlphabet::B_C;
2916  break;
2917  case 'G':
2918  value='0' +CAlphabet::B_G;
2919  break;
2920  case 'T':
2921  value='0' +CAlphabet::B_T;
2922  break;
2923  case '1': case '2': case'3': case '4': case'5':
2924  case '6': case '7': case'8': case '9': case '0': break ;
2925  case '.': case 'e': case '-': break ;
2926  default:
2927  SG_ERROR("found crap: %i %c (pos:%li)\n",i,value,ftell(file))
2928  };
2929  buffer[i++]=value;
2930  }
2931  ungetc(value, file);
2932  buffer[i]='\0';
2933 
2934  return (i<=length) && (i>0);
2935  }
2936  return false;
2937 }
2938 
2939 /*
2940  -format specs: model_file (model.hmm)
2941  % HMM - specification
2942  % N - number of states
2943  % M - number of observation_tokens
2944  % a is state_transition_matrix
2945  % size(a)= [N,N]
2946  %
2947  % b is observation_per_state_matrix
2948  % size(b)= [N,M]
2949  %
2950  % p is initial distribution
2951  % size(p)= [1, N]
2952 
2953  N=<int32_t>;
2954  M=<int32_t>;
2955 
2956  p=[<float64_t>,<float64_t>...<DOUBLE>];
2957  q=[<DOUBLE>,<DOUBLE>...<DOUBLE>];
2958 
2959  a=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2960  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2961  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2962  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2963  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2964  ];
2965 
2966  b=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2967  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2968  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2969  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2970  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
2971  ];
2972  */
2973 
2974 bool CHMM::load_model(FILE* file)
2975 {
2976  int32_t received_params=0; //a,b,p,N,M,O
2977 
2978  bool result=false;
2979  E_STATE state=INITIAL;
2980  char buffer[1024];
2981 
2982  line=1;
2983  int32_t i,j;
2984 
2985  if (file)
2986  {
2987  while (state!=END)
2988  {
2989  int32_t value=fgetc(file);
2990 
2991  if (value=='\n')
2992  line++;
2993  if (value==EOF)
2994  state=END;
2995 
2996  switch (state)
2997  {
2998  case INITIAL: // in the initial state only N,M initialisations and comments are allowed
2999  if (value=='N')
3000  {
3001  if (received_params & GOTN)
3002  error(line, "in model file: \"p double defined\"");
3003  else
3004  state=GET_N;
3005  }
3006  else if (value=='M')
3007  {
3008  if (received_params & GOTM)
3009  error(line, "in model file: \"p double defined\"");
3010  else
3011  state=GET_M;
3012  }
3013  else if (value=='%')
3014  {
3015  state=COMMENT;
3016  }
3017  break;
3018  case ARRAYs: // when n,m, order are known p,a,b arrays are allowed to be read
3019  if (value=='p')
3020  {
3021  if (received_params & GOTp)
3022  error(line, "in model file: \"p double defined\"");
3023  else
3024  state=GET_p;
3025  }
3026  if (value=='q')
3027  {
3028  if (received_params & GOTq)
3029  error(line, "in model file: \"q double defined\"");
3030  else
3031  state=GET_q;
3032  }
3033  else if (value=='a')
3034  {
3035  if (received_params & GOTa)
3036  error(line, "in model file: \"a double defined\"");
3037  else
3038  state=GET_a;
3039  }
3040  else if (value=='b')
3041  {
3042  if (received_params & GOTb)
3043  error(line, "in model file: \"b double defined\"");
3044  else
3045  state=GET_b;
3046  }
3047  else if (value=='%')
3048  {
3049  state=COMMENT;
3050  }
3051  break;
3052  case GET_N:
3053  if (value=='=')
3054  {
3055  if (get_numbuffer(file, buffer, 4)) //get num
3056  {
3057  this->N= atoi(buffer);
3058  received_params|=GOTN;
3059  state= (received_params == (GOTN | GOTM | GOTO)) ? ARRAYs : INITIAL;
3060  }
3061  else
3062  state=END; //end if error
3063  }
3064  break;
3065  case GET_M:
3066  if (value=='=')
3067  {
3068  if (get_numbuffer(file, buffer, 4)) //get num
3069  {
3070  this->M= atoi(buffer);
3071  received_params|=GOTM;
3072  state= (received_params == (GOTN | GOTM | GOTO)) ? ARRAYs : INITIAL;
3073  }
3074  else
3075  state=END; //end if error
3076  }
3077  break;
3078  case GET_a:
3079  if (value=='=')
3080  {
3081  float64_t f;
3082 
3083  transition_matrix_a=SG_MALLOC(float64_t, N*N);
3084  open_bracket(file);
3085  for (i=0; i<this->N; i++)
3086  {
3087  open_bracket(file);
3088 
3089  for (j=0; j<this->N ; j++)
3090  {
3091 
3092  if (fscanf( file, "%le", &f ) != 1)
3093  error(line, "float64_t expected");
3094  else
3095  set_a(i,j, f);
3096 
3097  if (j<this->N-1)
3098  comma_or_space(file);
3099  else
3100  close_bracket(file);
3101  }
3102 
3103  if (i<this->N-1)
3104  comma_or_space(file);
3105  else
3106  close_bracket(file);
3107  }
3108  received_params|=GOTa;
3109  }
3110  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3111  break;
3112  case GET_b:
3113  if (value=='=')
3114  {
3115  float64_t f;
3116 
3117  observation_matrix_b=SG_MALLOC(float64_t, N*M);
3118  open_bracket(file);
3119  for (i=0; i<this->N; i++)
3120  {
3121  open_bracket(file);
3122 
3123  for (j=0; j<this->M ; j++)
3124  {
3125 
3126  if (fscanf( file, "%le", &f ) != 1)
3127  error(line, "float64_t expected");
3128  else
3129  set_b(i,j, f);
3130 
3131  if (j<this->M-1)
3132  comma_or_space(file);
3133  else
3134  close_bracket(file);
3135  }
3136 
3137  if (i<this->N-1)
3138  comma_or_space(file);
3139  else
3140  close_bracket(file);
3141  }
3142  received_params|=GOTb;
3143  }
3144  state= ((received_params & (GOTa | GOTb | GOTp | GOTq)) == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3145  break;
3146  case GET_p:
3147  if (value=='=')
3148  {
3149  float64_t f;
3150 
3152  open_bracket(file);
3153  for (i=0; i<this->N ; i++)
3154  {
3155  if (fscanf( file, "%le", &f ) != 1)
3156  error(line, "float64_t expected");
3157  else
3158  set_p(i, f);
3159 
3160  if (i<this->N-1)
3161  comma_or_space(file);
3162  else
3163  close_bracket(file);
3164  }
3165  received_params|=GOTp;
3166  }
3167  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3168  break;
3169  case GET_q:
3170  if (value=='=')
3171  {
3172  float64_t f;
3173 
3174  end_state_distribution_q=SG_MALLOC(float64_t, N);
3175  open_bracket(file);
3176  for (i=0; i<this->N ; i++)
3177  {
3178  if (fscanf( file, "%le", &f ) != 1)
3179  error(line, "float64_t expected");
3180  else
3181  set_q(i, f);
3182 
3183  if (i<this->N-1)
3184  comma_or_space(file);
3185  else
3186  close_bracket(file);
3187  }
3188  received_params|=GOTq;
3189  }
3190  state= (received_params == (GOTa | GOTb | GOTp | GOTq)) ? END : ARRAYs;
3191  break;
3192  case COMMENT:
3193  if (value==EOF)
3194  state=END;
3195  else if (value=='\n')
3196  {
3197  line++;
3198  state=INITIAL;
3199  }
3200  break;
3201 
3202  default:
3203  break;
3204  }
3205  }
3206  result= (received_params== (GOTa | GOTb | GOTp | GOTq | GOTN | GOTM | GOTO));
3207  }
3208 
3209  SG_WARNING("not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n")
3211  return result;
3212 }
3213 
3214 /*
3215  -format specs: train_file (train.trn)
3216  % HMM-TRAIN - specification
3217  % learn_a - elements in state_transition_matrix to be learned
3218  % learn_b - elements in oberservation_per_state_matrix to be learned
3219  % note: each line stands for
3220  % <state>, <observation(0)>, observation(1)...observation(NOW)>
3221  % learn_p - elements in initial distribution to be learned
3222  % learn_q - elements in the end-state distribution to be learned
3223  %
3224  % const_x - specifies initial values of elements
3225  % rest is assumed to be 0.0
3226  %
3227  % NOTE: IMPLICIT DEFINES:
3228  % #define A 0
3229  % #define C 1
3230  % #define G 2
3231  % #define T 3
3232  %
3233 
3234  learn_a=[ [<int32_t>,<int32_t>];
3235  [<int32_t>,<int32_t>];
3236  [<int32_t>,<int32_t>];
3237  ........
3238  [<int32_t>,<int32_t>];
3239  [-1,-1];
3240  ];
3241 
3242  learn_b=[ [<int32_t>,<int32_t>];
3243  [<int32_t>,<int32_t>];
3244  [<int32_t>,<int32_t>];
3245  ........
3246  [<int32_t>,<int32_t>];
3247  [-1,-1];
3248  ];
3249 
3250  learn_p= [ <int32_t>, ... , <int32_t>, -1 ];
3251  learn_q= [ <int32_t>, ... , <int32_t>, -1 ];
3252 
3253 
3254  const_a=[ [<int32_t>,<int32_t>,<DOUBLE>];
3255  [<int32_t>,<int32_t>,<DOUBLE>];
3256  [<int32_t>,<int32_t>,<DOUBLE>];
3257  ........
3258  [<int32_t>,<int32_t>,<DOUBLE>];
3259  [-1,-1,-1];
3260  ];
3261 
3262  const_b=[ [<int32_t>,<int32_t>,<DOUBLE>];
3263  [<int32_t>,<int32_t>,<DOUBLE>];
3264  [<int32_t>,<int32_t>,<DOUBLE];
3265  ........
3266  [<int32_t>,<int32_t>,<DOUBLE>];
3267  [-1,-1];
3268  ];
3269 
3270  const_p[]=[ [<int32_t>, <DOUBLE>], ... , [<int32_t>,<DOUBLE>], [-1,-1] ];
3271  const_q[]=[ [<int32_t>, <DOUBLE>], ... , [<int32_t>,<DOUBLE>], [-1,-1] ];
3272  */
3273 bool CHMM::load_definitions(FILE* file, bool verbose, bool _initialize)
3274 {
3275  if (model)
3276  delete model ;
3277  model=new Model();
3278 
3279  int32_t received_params=0x0000000; //a,b,p,q,N,M,O
3280  char buffer[1024];
3281 
3282  bool result=false;
3283  E_STATE state=INITIAL;
3284 
3285  { // do some useful initializations
3286  model->set_learn_a(0, -1);
3287  model->set_learn_a(1, -1);
3288  model->set_const_a(0, -1);
3289  model->set_const_a(1, -1);
3290  model->set_const_a_val(0, 1.0);
3291  model->set_learn_b(0, -1);
3292  model->set_const_b(0, -1);
3293  model->set_const_b_val(0, 1.0);
3294  model->set_learn_p(0, -1);
3295  model->set_learn_q(0, -1);
3296  model->set_const_p(0, -1);
3297  model->set_const_q(0, -1);
3298  } ;
3299 
3300  line=1;
3301 
3302  if (file)
3303  {
3304  while (state!=END)
3305  {
3306  int32_t value=fgetc(file);
3307 
3308  if (value=='\n')
3309  line++;
3310 
3311  if (value==EOF)
3312  state=END;
3313 
3314  switch (state)
3315  {
3316  case INITIAL:
3317  if (value=='l')
3318  {
3319  if (fgetc(file)=='e' && fgetc(file)=='a' && fgetc(file)=='r' && fgetc(file)=='n' && fgetc(file)=='_')
3320  {
3321  switch(fgetc(file))
3322  {
3323  case 'a':
3324  state=GET_learn_a;
3325  break;
3326  case 'b':
3327  state=GET_learn_b;
3328  break;
3329  case 'p':
3330  state=GET_learn_p;
3331  break;
3332  case 'q':
3333  state=GET_learn_q;
3334  break;
3335  default:
3336  error(line, "a,b,p or q expected in train definition file");
3337  };
3338  }
3339  }
3340  else if (value=='c')
3341  {
3342  if (fgetc(file)=='o' && fgetc(file)=='n' && fgetc(file)=='s'
3343  && fgetc(file)=='t' && fgetc(file)=='_')
3344  {
3345  switch(fgetc(file))
3346  {
3347  case 'a':
3348  state=GET_const_a;
3349  break;
3350  case 'b':
3351  state=GET_const_b;
3352  break;
3353  case 'p':
3354  state=GET_const_p;
3355  break;
3356  case 'q':
3357  state=GET_const_q;
3358  break;
3359  default:
3360  error(line, "a,b,p or q expected in train definition file");
3361  };
3362  }
3363  }
3364  else if (value=='%')
3365  {
3366  state=COMMENT;
3367  }
3368  else if (value==EOF)
3369  {
3370  state=END;
3371  }
3372  break;
3373  case GET_learn_a:
3374  if (value=='=')
3375  {
3376  open_bracket(file);
3377  bool finished=false;
3378  int32_t i=0;
3379 
3380  if (verbose)
3381  SG_DEBUG("\nlearn for transition matrix: ")
3382  while (!finished)
3383  {
3384  open_bracket(file);
3385 
3386  if (get_numbuffer(file, buffer, 4)) //get num
3387  {
3388  value=atoi(buffer);
3389  model->set_learn_a(i++, value);
3390 
3391  if (value<0)
3392  {
3393  finished=true;
3394  break;
3395  }
3396  if (value>=N)
3397  SG_ERROR("invalid value for learn_a(%i,0): %i\n",i/2,(int)value)
3398  }
3399  else
3400  break;
3401 
3402  comma_or_space(file);
3403 
3404  if (get_numbuffer(file, buffer, 4)) //get num
3405  {
3406  value=atoi(buffer);
3407  model->set_learn_a(i++, value);
3408 
3409  if (value<0)
3410  {
3411  finished=true;
3412  break;
3413  }
3414  if (value>=N)
3415  SG_ERROR("invalid value for learn_a(%i,1): %i\n",i/2-1,(int)value)
3416 
3417  }
3418  else
3419  break;
3420  close_bracket(file);
3421  }
3422  close_bracket(file);
3423  if (verbose)
3424  SG_DEBUG("%i Entries",(int)(i/2))
3425 
3426  if (finished)
3427  {
3428  received_params|=GOTlearn_a;
3429 
3430  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3431  }
3432  else
3433  state=END;
3434  }
3435  break;
3436  case GET_learn_b:
3437  if (value=='=')
3438  {
3439  open_bracket(file);
3440  bool finished=false;
3441  int32_t i=0;
3442 
3443  if (verbose)
3444  SG_DEBUG("\nlearn for emission matrix: ")
3445 
3446  while (!finished)
3447  {
3448  open_bracket(file);
3449 
3450  int32_t combine=0;
3451 
3452  for (int32_t j=0; j<2; j++)
3453  {
3454  if (get_numbuffer(file, buffer, 4)) //get num
3455  {
3456  value=atoi(buffer);
3457 
3458  if (j==0)
3459  {
3460  model->set_learn_b(i++, value);
3461 
3462  if (value<0)
3463  {
3464  finished=true;
3465  break;
3466  }
3467  if (value>=N)
3468  SG_ERROR("invalid value for learn_b(%i,0): %i\n",i/2,(int)value)
3469  }
3470  else
3471  combine=value;
3472  }
3473  else
3474  break;
3475 
3476  if (j<1)
3477  comma_or_space(file);
3478  else
3479  close_bracket(file);
3480  }
3481  model->set_learn_b(i++, combine);
3482  if (combine>=M)
3483 
3484  SG_ERROR("invalid value for learn_b(%i,1): %i\n",i/2-1,(int)value)
3485  }
3486  close_bracket(file);
3487  if (verbose)
3488  SG_DEBUG("%i Entries",(int)(i/2-1))
3489 
3490  if (finished)
3491  {
3492  received_params|=GOTlearn_b;
3493  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3494  }
3495  else
3496  state=END;
3497  }
3498  break;
3499  case GET_learn_p:
3500  if (value=='=')
3501  {
3502  open_bracket(file);
3503  bool finished=false;
3504  int32_t i=0;
3505 
3506  if (verbose)
3507  SG_DEBUG("\nlearn start states: ")
3508  while (!finished)
3509  {
3510  if (get_numbuffer(file, buffer, 4)) //get num
3511  {
3512  value=atoi(buffer);
3513 
3514  model->set_learn_p(i++, value);
3515 
3516  if (value<0)
3517  {
3518  finished=true;
3519  break;
3520  }
3521  if (value>=N)
3522  SG_ERROR("invalid value for learn_p(%i): %i\n",i-1,(int)value)
3523  }
3524  else
3525  break;
3526 
3527  comma_or_space(file);
3528  }
3529 
3530  close_bracket(file);
3531  if (verbose)
3532  SG_DEBUG("%i Entries",i-1)
3533 
3534  if (finished)
3535  {
3536  received_params|=GOTlearn_p;
3537  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3538  }
3539  else
3540  state=END;
3541  }
3542  break;
3543  case GET_learn_q:
3544  if (value=='=')
3545  {
3546  open_bracket(file);
3547  bool finished=false;
3548  int32_t i=0;
3549 
3550  if (verbose)
3551  SG_DEBUG("\nlearn terminal states: ")
3552  while (!finished)
3553  {
3554  if (get_numbuffer(file, buffer, 4)) //get num
3555  {
3556  value=atoi(buffer);
3557  model->set_learn_q(i++, value);
3558 
3559  if (value<0)
3560  {
3561  finished=true;
3562  break;
3563  }
3564  if (value>=N)
3565  SG_ERROR("invalid value for learn_q(%i): %i\n",i-1,(int)value)
3566  }
3567  else
3568  break;
3569 
3570  comma_or_space(file);
3571  }
3572 
3573  close_bracket(file);
3574  if (verbose)
3575  SG_DEBUG("%i Entries",i-1)
3576 
3577  if (finished)
3578  {
3579  received_params|=GOTlearn_q;
3580  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3581  }
3582  else
3583  state=END;
3584  }
3585  break;
3586  case GET_const_a:
3587  if (value=='=')
3588  {
3589  open_bracket(file);
3590  bool finished=false;
3591  int32_t i=0;
3592 
3593  if (verbose)
3594 #ifdef USE_HMMDEBUG
3595  SG_DEBUG("\nconst for transition matrix: \n")
3596 #else
3597  SG_DEBUG("\nconst for transition matrix: ")
3598 #endif
3599  while (!finished)
3600  {
3601  open_bracket(file);
3602 
3603  if (get_numbuffer(file, buffer, 4)) //get num
3604  {
3605  value=atoi(buffer);
3606  model->set_const_a(i++, value);
3607 
3608  if (value<0)
3609  {
3610  finished=true;
3611  model->set_const_a(i++, value);
3612  model->set_const_a_val((int32_t)i/2 - 1, value);
3613  break;
3614  }
3615  if (value>=N)
3616  SG_ERROR("invalid value for const_a(%i,0): %i\n",i/2,(int)value)
3617  }
3618  else
3619  break;
3620 
3621  comma_or_space(file);
3622 
3623  if (get_numbuffer(file, buffer, 4)) //get num
3624  {
3625  value=atoi(buffer);
3626  model->set_const_a(i++, value);
3627 
3628  if (value<0)
3629  {
3630  finished=true;
3631  model->set_const_a_val((int32_t)i/2 - 1, value);
3632  break;
3633  }
3634  if (value>=N)
3635  SG_ERROR("invalid value for const_a(%i,1): %i\n",i/2-1,(int)value)
3636  }
3637  else
3638  break;
3639 
3640  if (!comma_or_space(file))
3641  model->set_const_a_val((int32_t)i/2 - 1, 1.0);
3642  else
3643  if (get_numbuffer(file, buffer, 10)) //get num
3644  {
3645  float64_t dvalue=atof(buffer);
3646  model->set_const_a_val((int32_t)i/2 - 1, dvalue);
3647  if (dvalue<0)
3648  {
3649  finished=true;
3650  break;
3651  }
3652  if ((dvalue>1.0) || (dvalue<0.0))
3653  SG_ERROR("invalid value for const_a_val(%i): %e\n",(int)i/2-1,dvalue)
3654  }
3655  else
3656  model->set_const_a_val((int32_t)i/2 - 1, 1.0);
3657 
3658 #ifdef USE_HMMDEBUG
3659  if (verbose)
3660  SG_ERROR("const_a(%i,%i)=%e\n", model->get_const_a((int32_t)i/2-1,0),model->get_const_a((int32_t)i/2-1,1),model->get_const_a_val((int32_t)i/2-1))
3661 #endif
3662  close_bracket(file);
3663  }
3664  close_bracket(file);
3665  if (verbose)
3666  SG_DEBUG("%i Entries",(int)i/2-1)
3667 
3668  if (finished)
3669  {
3670  received_params|=GOTconst_a;
3671  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3672  }
3673  else
3674  state=END;
3675  }
3676  break;
3677 
3678  case GET_const_b:
3679  if (value=='=')
3680  {
3681  open_bracket(file);
3682  bool finished=false;
3683  int32_t i=0;
3684 
3685  if (verbose)
3686 #ifdef USE_HMMDEBUG
3687  SG_DEBUG("\nconst for emission matrix: \n")
3688 #else
3689  SG_DEBUG("\nconst for emission matrix: ")
3690 #endif
3691  while (!finished)
3692  {
3693  open_bracket(file);
3694  int32_t combine=0;
3695  for (int32_t j=0; j<3; j++)
3696  {
3697  if (get_numbuffer(file, buffer, 10)) //get num
3698  {
3699  if (j==0)
3700  {
3701  value=atoi(buffer);
3702 
3703  model->set_const_b(i++, value);
3704 
3705  if (value<0)
3706  {
3707  finished=true;
3708  //model->set_const_b_val((int32_t)(i-1)/2, value);
3709  break;
3710  }
3711  if (value>=N)
3712  SG_ERROR("invalid value for const_b(%i,0): %i\n",i/2-1,(int)value)
3713  }
3714  else if (j==2)
3715  {
3716  float64_t dvalue=atof(buffer);
3717  model->set_const_b_val((int32_t)(i-1)/2, dvalue);
3718  if (dvalue<0)
3719  {
3720  finished=true;
3721  break;
3722  } ;
3723  if ((dvalue>1.0) || (dvalue<0.0))
3724  SG_ERROR("invalid value for const_b_val(%i,1): %e\n",i/2-1,dvalue)
3725  }
3726  else
3727  {
3728  value=atoi(buffer);
3729  combine= value;
3730  } ;
3731  }
3732  else
3733  {
3734  if (j==2)
3735  model->set_const_b_val((int32_t)(i-1)/2, 1.0);
3736  break;
3737  } ;
3738  if (j<2)
3739  if ((!comma_or_space(file)) && (j==1))
3740  {
3741  model->set_const_b_val((int32_t)(i-1)/2, 1.0) ;
3742  break ;
3743  } ;
3744  }
3745  close_bracket(file);
3746  model->set_const_b(i++, combine);
3747  if (combine>=M)
3748  SG_ERROR("invalid value for const_b(%i,1): %i\n",i/2-1, combine)
3749 #ifdef USE_HMMDEBUG
3750  if (verbose && !finished)
3751  SG_ERROR("const_b(%i,%i)=%e\n", model->get_const_b((int32_t)i/2-1,0),model->get_const_b((int32_t)i/2-1,1),model->get_const_b_val((int32_t)i/2-1))
3752 #endif
3753  }
3754  close_bracket(file);
3755  if (verbose)
3756  SG_ERROR("%i Entries",(int)i/2-1)
3757 
3758  if (finished)
3759  {
3760  received_params|=GOTconst_b;
3761  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3762  }
3763  else
3764  state=END;
3765  }
3766  break;
3767  case GET_const_p:
3768  if (value=='=')
3769  {
3770  open_bracket(file);
3771  bool finished=false;
3772  int32_t i=0;
3773 
3774  if (verbose)
3775 #ifdef USE_HMMDEBUG
3776  SG_DEBUG("\nconst for start states: \n")
3777 #else
3778  SG_DEBUG("\nconst for start states: ")
3779 #endif
3780  while (!finished)
3781  {
3782  open_bracket(file);
3783 
3784  if (get_numbuffer(file, buffer, 4)) //get num
3785  {
3786  value=atoi(buffer);
3787  model->set_const_p(i, value);
3788 
3789  if (value<0)
3790  {
3791  finished=true;
3792  model->set_const_p_val(i++, value);
3793  break;
3794  }
3795  if (value>=N)
3796  SG_ERROR("invalid value for const_p(%i): %i\n",i,(int)value)
3797 
3798  }
3799  else
3800  break;
3801 
3802  if (!comma_or_space(file))
3803  model->set_const_p_val(i++, 1.0);
3804  else
3805  if (get_numbuffer(file, buffer, 10)) //get num
3806  {
3807  float64_t dvalue=atof(buffer);
3808  model->set_const_p_val(i++, dvalue);
3809  if (dvalue<0)
3810  {
3811  finished=true;
3812  break;
3813  }
3814  if ((dvalue>1) || (dvalue<0))
3815  SG_ERROR("invalid value for const_p_val(%i): %e\n",i,dvalue)
3816  }
3817  else
3818  model->set_const_p_val(i++, 1.0);
3819 
3820  close_bracket(file);
3821 
3822 #ifdef USE_HMMDEBUG
3823  if (verbose)
3824  SG_DEBUG("const_p(%i)=%e\n", model->get_const_p(i-1),model->get_const_p_val(i-1))
3825 #endif
3826  }
3827  if (verbose)
3828  SG_DEBUG("%i Entries",i-1)
3829 
3830  close_bracket(file);
3831 
3832  if (finished)
3833  {
3834  received_params|=GOTconst_p;
3835  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3836  }
3837  else
3838  state=END;
3839  }
3840  break;
3841  case GET_const_q:
3842  if (value=='=')
3843  {
3844  open_bracket(file);
3845  bool finished=false;
3846  if (verbose)
3847 #ifdef USE_HMMDEBUG
3848  SG_DEBUG("\nconst for terminal states: \n")
3849 #else
3850  SG_DEBUG("\nconst for terminal states: ")
3851 #endif
3852  int32_t i=0;
3853 
3854  while (!finished)
3855  {
3856  open_bracket(file) ;
3857  if (get_numbuffer(file, buffer, 4)) //get num
3858  {
3859  value=atoi(buffer);
3860  model->set_const_q(i, value);
3861  if (value<0)
3862  {
3863  finished=true;
3864  model->set_const_q_val(i++, value);
3865  break;
3866  }
3867  if (value>=N)
3868  SG_ERROR("invalid value for const_q(%i): %i\n",i,(int)value)
3869  }
3870  else
3871  break;
3872 
3873  if (!comma_or_space(file))
3874  model->set_const_q_val(i++, 1.0);
3875  else
3876  if (get_numbuffer(file, buffer, 10)) //get num
3877  {
3878  float64_t dvalue=atof(buffer);
3879  model->set_const_q_val(i++, dvalue);
3880  if (dvalue<0)
3881  {
3882  finished=true;
3883  break;
3884  }
3885  if ((dvalue>1) || (dvalue<0))
3886  SG_ERROR("invalid value for const_q_val(%i): %e\n",i,(double) dvalue)
3887  }
3888  else
3889  model->set_const_q_val(i++, 1.0);
3890 
3891  close_bracket(file);
3892 #ifdef USE_HMMDEBUG
3893  if (verbose)
3894  SG_DEBUG("const_q(%i)=%e\n", model->get_const_q(i-1),model->get_const_q_val(i-1))
3895 #endif
3896  }
3897  if (verbose)
3898  SG_DEBUG("%i Entries",i-1)
3899 
3900  close_bracket(file);
3901 
3902  if (finished)
3903  {
3904  received_params|=GOTconst_q;
3905  state= (received_params == (GOTlearn_a | GOTlearn_b | GOTlearn_p | GOTlearn_q |GOTconst_a | GOTconst_b | GOTconst_p | GOTconst_q)) ? END : INITIAL;
3906  }
3907  else
3908  state=END;
3909  }
3910  break;
3911  case COMMENT:
3912  if (value==EOF)
3913  state=END;
3914  else if (value=='\n')
3915  state=INITIAL;
3916  break;
3917 
3918  default:
3919  break;
3920  }
3921  }
3922  }
3923 
3924  /*result=((received_params&(GOTlearn_a | GOTconst_a))!=0) ;
3925  result=result && ((received_params&(GOTlearn_b | GOTconst_b))!=0) ;
3926  result=result && ((received_params&(GOTlearn_p | GOTconst_p))!=0) ;
3927  result=result && ((received_params&(GOTlearn_q | GOTconst_q))!=0) ; */
3928  result=1 ;
3929  if (result)
3930  {
3931  model->sort_learn_a() ;
3932  model->sort_learn_b() ;
3933  if (_initialize)
3934  {
3935  init_model_defined(); ;
3936  convert_to_log();
3937  } ;
3938  }
3939  if (verbose)
3940  SG_DEBUG("\n")
3941  return result;
3942 }
3943 
3944 /*
3945  -format specs: model_file (model.hmm)
3946  % HMM - specification
3947  % N - number of states
3948  % M - number of observation_tokens
3949  % a is state_transition_matrix
3950  % size(a)= [N,N]
3951  %
3952  % b is observation_per_state_matrix
3953  % size(b)= [N,M]
3954  %
3955  % p is initial distribution
3956  % size(p)= [1, N]
3957 
3958  N=<int32_t>;
3959  M=<int32_t>;
3960 
3961  p=[<DOUBLE>,<DOUBLE>...<DOUBLE>];
3962 
3963  a=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3964  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3965  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3966  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3967  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3968  ];
3969 
3970  b=[ [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3971  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3972  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3973  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3974  [<DOUBLE>,<DOUBLE>...<DOUBLE>];
3975  ];
3976  */
3977 
3978 bool CHMM::save_model(FILE* file)
3979 {
3980  bool result=false;
3981  int32_t i,j;
3982  const float32_t NAN_REPLACEMENT = (float32_t) CMath::ALMOST_NEG_INFTY ;
3983 
3984  if (file)
3985  {
3986  fprintf(file,"%s","% HMM - specification\n% N - number of states\n% M - number of observation_tokens\n% a is state_transition_matrix\n% size(a)= [N,N]\n%\n% b is observation_per_state_matrix\n% size(b)= [N,M]\n%\n% p is initial distribution\n% size(p)= [1, N]\n\n% q is distribution of end states\n% size(q)= [1, N]\n");
3987  fprintf(file,"N=%d;\n",N);
3988  fprintf(file,"M=%d;\n",M);
3989 
3990  fprintf(file,"p=[");
3991  for (i=0; i<N; i++)
3992  {
3993  if (i<N-1) {
3994  if (CMath::is_finite(get_p(i)))
3995  fprintf(file, "%e,", (double)get_p(i));
3996  else
3997  fprintf(file, "%f,", NAN_REPLACEMENT);
3998  }
3999  else {
4000  if (CMath::is_finite(get_p(i)))
4001  fprintf(file, "%e", (double)get_p(i));
4002  else
4003  fprintf(file, "%f", NAN_REPLACEMENT);
4004  }
4005  }
4006 
4007  fprintf(file,"];\n\nq=[");
4008  for (i=0; i<N; i++)
4009  {
4010  if (i<N-1) {
4011  if (CMath::is_finite(get_q(i)))
4012  fprintf(file, "%e,", (double)get_q(i));
4013  else
4014  fprintf(file, "%f,", NAN_REPLACEMENT);
4015  }
4016  else {
4017  if (CMath::is_finite(get_q(i)))
4018  fprintf(file, "%e", (double)get_q(i));
4019  else
4020  fprintf(file, "%f", NAN_REPLACEMENT);
4021  }
4022  }
4023  fprintf(file,"];\n\na=[");
4024 
4025  for (i=0; i<N; i++)
4026  {
4027  fprintf(file, "\t[");
4028 
4029  for (j=0; j<N; j++)
4030  {
4031  if (j<N-1) {
4032  if (CMath::is_finite(get_a(i,j)))
4033  fprintf(file, "%e,", (double)get_a(i,j));
4034  else
4035  fprintf(file, "%f,", NAN_REPLACEMENT);
4036  }
4037  else {
4038  if (CMath::is_finite(get_a(i,j)))
4039  fprintf(file, "%e];\n", (double)get_a(i,j));
4040  else
4041  fprintf(file, "%f];\n", NAN_REPLACEMENT);
4042  }
4043  }
4044  }
4045 
4046  fprintf(file," ];\n\nb=[");
4047 
4048  for (i=0; i<N; i++)
4049  {
4050  fprintf(file, "\t[");
4051 
4052  for (j=0; j<M; j++)
4053  {
4054  if (j<M-1) {
4055  if (CMath::is_finite(get_b(i,j)))
4056  fprintf(file, "%e,", (double)get_b(i,j));
4057  else
4058  fprintf(file, "%f,", NAN_REPLACEMENT);
4059  }
4060  else {
4061  if (CMath::is_finite(get_b(i,j)))
4062  fprintf(file, "%e];\n", (double)get_b(i,j));
4063  else
4064  fprintf(file, "%f];\n", NAN_REPLACEMENT);
4065  }
4066  }
4067  }
4068  result= (fprintf(file," ];\n") == 5);
4069  }
4070 
4071  return result;
4072 }
4073 
4074 T_STATES* CHMM::get_path(int32_t dim, float64_t& prob)
4075 {
4076  T_STATES* result = NULL;
4077 
4078  prob = best_path(dim);
4079  result = SG_MALLOC(T_STATES, p_observations->get_vector_length(dim));
4080 
4081  for (int32_t i=0; i<p_observations->get_vector_length(dim); i++)
4082  result[i]=PATH(dim)[i];
4083 
4084  return result;
4085 }
4086 
4087 bool CHMM::save_path(FILE* file)
4088 {
4089  bool result=false;
4090 
4091  if (file)
4092  {
4093  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4094  {
4095  if (dim%100==0)
4096  SG_PRINT("%i..", dim)
4097  float64_t prob = best_path(dim);
4098  fprintf(file,"%i. path probability:%e\nstate sequence:\n", dim, prob);
4099  for (int32_t i=0; i<p_observations->get_vector_length(dim)-1; i++)
4100  fprintf(file,"%d ", PATH(dim)[i]);
4101  fprintf(file,"%d", PATH(dim)[p_observations->get_vector_length(dim)-1]);
4102  fprintf(file,"\n\n") ;
4103  }
4104  SG_DONE()
4105  result=true;
4106  }
4107 
4108  return result;
4109 }
4110 
4112 {
4113  bool result=false;
4114 
4115  if (file)
4116  {
4117  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4118  {
4119  float32_t prob= (float32_t) model_probability(dim);
4120  fwrite(&prob, sizeof(float32_t), 1, file);
4121  }
4122  result=true;
4123  }
4124 
4125  return result;
4126 }
4127 
4128 bool CHMM::save_likelihood(FILE* file)
4129 {
4130  bool result=false;
4131 
4132  if (file)
4133  {
4134  fprintf(file, "%% likelihood of model per observation\n%% P[O|model]=[ P[O|model]_1 P[O|model]_2 ... P[O|model]_dim ]\n%%\n");
4135 
4136  fprintf(file, "P=[");
4137  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4138  fprintf(file, "%e ", (double) model_probability(dim));
4139 
4140  fprintf(file,"];");
4141  result=true;
4142  }
4143 
4144  return result;
4145 }
4146 
4147 #define FLOATWRITE(file, value) { float32_t rrr=float32_t(value); fwrite(&rrr, sizeof(float32_t), 1, file); num_floats++;}
4148 
4149 bool CHMM::save_model_bin(FILE* file)
4150 {
4151  int32_t i,j,q, num_floats=0 ;
4152  if (!model)
4153  {
4154  if (file)
4155  {
4156  // write id
4158  FLOATWRITE(file, (float32_t) 1);
4159 
4160  //derivates log(dp),log(dq)
4161  for (i=0; i<N; i++)
4162  FLOATWRITE(file, get_p(i));
4163  SG_INFO("wrote %i parameters for p\n",N)
4164 
4165  for (i=0; i<N; i++)
4166  FLOATWRITE(file, get_q(i)) ;
4167  SG_INFO("wrote %i parameters for q\n",N)
4168 
4169  //derivates log(da),log(db)
4170  for (i=0; i<N; i++)
4171  for (j=0; j<N; j++)
4172  FLOATWRITE(file, get_a(i,j));
4173  SG_INFO("wrote %i parameters for a\n",N*N)
4174 
4175  for (i=0; i<N; i++)
4176  for (j=0; j<M; j++)
4177  FLOATWRITE(file, get_b(i,j));
4178  SG_INFO("wrote %i parameters for b\n",N*M)
4179 
4180  // write id
4181  FLOATWRITE(file, (float32_t)CMath::INFTY);
4182  FLOATWRITE(file, (float32_t) 3);
4183 
4184  // write number of parameters
4185  FLOATWRITE(file, (float32_t) N);
4186  FLOATWRITE(file, (float32_t) N);
4187  FLOATWRITE(file, (float32_t) N*N);
4188  FLOATWRITE(file, (float32_t) N*M);
4189  FLOATWRITE(file, (float32_t) N);
4190  FLOATWRITE(file, (float32_t) M);
4191  } ;
4192  }
4193  else
4194  {
4195  if (file)
4196  {
4197  int32_t num_p, num_q, num_a, num_b ;
4198  // write id
4200  FLOATWRITE(file, (float32_t) 2);
4201 
4202  for (i=0; model->get_learn_p(i)>=0; i++)
4203  FLOATWRITE(file, get_p(model->get_learn_p(i)));
4204  num_p=i ;
4205  SG_INFO("wrote %i parameters for p\n",num_p)
4206 
4207  for (i=0; model->get_learn_q(i)>=0; i++)
4208  FLOATWRITE(file, get_q(model->get_learn_q(i)));
4209  num_q=i ;
4210  SG_INFO("wrote %i parameters for q\n",num_q)
4211 
4212  //derivates log(da),log(db)
4213  for (q=0; model->get_learn_a(q,1)>=0; q++)
4214  {
4215  i=model->get_learn_a(q,0) ;
4216  j=model->get_learn_a(q,1) ;
4217  FLOATWRITE(file, (float32_t)i);
4218  FLOATWRITE(file, (float32_t)j);
4219  FLOATWRITE(file, get_a(i,j));
4220  } ;
4221  num_a=q ;
4222  SG_INFO("wrote %i parameters for a\n",num_a)
4223 
4224  for (q=0; model->get_learn_b(q,0)>=0; q++)
4225  {
4226  i=model->get_learn_b(q,0) ;
4227  j=model->get_learn_b(q,1) ;
4228  FLOATWRITE(file, (float32_t)i);
4229  FLOATWRITE(file, (float32_t)j);
4230  FLOATWRITE(file, get_b(i,j));
4231  } ;
4232  num_b=q ;
4233  SG_INFO("wrote %i parameters for b\n",num_b)
4234 
4235  // write id
4236  FLOATWRITE(file, (float32_t)CMath::INFTY);
4237  FLOATWRITE(file, (float32_t) 3);
4238 
4239  // write number of parameters
4240  FLOATWRITE(file, (float32_t) num_p);
4241  FLOATWRITE(file, (float32_t) num_q);
4242  FLOATWRITE(file, (float32_t) num_a);
4243  FLOATWRITE(file, (float32_t) num_b);
4244  FLOATWRITE(file, (float32_t) N);
4245  FLOATWRITE(file, (float32_t) M);
4246  } ;
4247  } ;
4248  return true ;
4249 }
4250 
4251 bool CHMM::save_path_derivatives(FILE* logfile)
4252 {
4253  int32_t dim,i,j;
4254 
4255  if (logfile)
4256  {
4257  fprintf(logfile,"%% lambda denotes the model\n%% O denotes the observation sequence\n%% Q denotes the path\n%% \n%% calculating derivatives of P[O,Q|lambda]=p_{Q1}b_{Q1}(O_1}*a_{Q1}{Q2}b_{Q2}(O2)*...*q_{T-1}{T}b_{QT}(O_T}q_{Q_T} against p,q,a,b\n%%\n");
4258  fprintf(logfile,"%% dPr[...]=[ [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4259  fprintf(logfile,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4260  fprintf(logfile,"%% ............................. \n");
4261  fprintf(logfile,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_MM]\n");
4262  fprintf(logfile,"%% ];\n%%\n\ndPr(log()) = [\n");
4263  }
4264  else
4265  return false ;
4266 
4267  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4268  {
4269  best_path(dim);
4270 
4271  fprintf(logfile, "[ ");
4272 
4273  //derivates dlogp,dlogq
4274  for (i=0; i<N; i++)
4275  fprintf(logfile,"%e, ", (double) path_derivative_p(i,dim) );
4276 
4277  for (i=0; i<N; i++)
4278  fprintf(logfile,"%e, ", (double) path_derivative_q(i,dim) );
4279 
4280  //derivates dloga,dlogb
4281  for (i=0; i<N; i++)
4282  for (j=0; j<N; j++)
4283  fprintf(logfile, "%e,", (double) path_derivative_a(i,j,dim) );
4284 
4285  for (i=0; i<N; i++)
4286  for (j=0; j<M; j++)
4287  fprintf(logfile, "%e,", (double) path_derivative_b(i,j,dim) );
4288 
4289  fseek(logfile,ftell(logfile)-1,SEEK_SET);
4290  fprintf(logfile, " ];\n");
4291  }
4292 
4293  fprintf(logfile, "];");
4294 
4295  return true ;
4296 }
4297 
4299 {
4300  bool result=false;
4301  int32_t dim,i,j,q;
4302  float64_t prob=0 ;
4303  int32_t num_floats=0 ;
4304 
4305  float64_t sum_prob=0.0 ;
4306  if (!model)
4307  SG_WARNING("No definitions loaded -- writing derivatives of all weights\n")
4308  else
4309  SG_INFO("writing derivatives of changed weights only\n")
4310 
4311  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4312  {
4313  if (dim%100==0)
4314  {
4315  SG_PRINT(".")
4316 
4317  } ;
4318 
4319  prob=best_path(dim);
4320  sum_prob+=prob ;
4321 
4322  if (!model)
4323  {
4324  if (logfile)
4325  {
4326  // write prob
4327  FLOATWRITE(logfile, prob);
4328 
4329  for (i=0; i<N; i++)
4330  FLOATWRITE(logfile, path_derivative_p(i,dim));
4331 
4332  for (i=0; i<N; i++)
4333  FLOATWRITE(logfile, path_derivative_q(i,dim));
4334 
4335  for (i=0; i<N; i++)
4336  for (j=0; j<N; j++)
4337  FLOATWRITE(logfile, path_derivative_a(i,j,dim));
4338 
4339  for (i=0; i<N; i++)
4340  for (j=0; j<M; j++)
4341  FLOATWRITE(logfile, path_derivative_b(i,j,dim));
4342 
4343  }
4344  }
4345  else
4346  {
4347  if (logfile)
4348  {
4349  // write prob
4350  FLOATWRITE(logfile, prob);
4351 
4352  for (i=0; model->get_learn_p(i)>=0; i++)
4353  FLOATWRITE(logfile, path_derivative_p(model->get_learn_p(i),dim));
4354 
4355  for (i=0; model->get_learn_q(i)>=0; i++)
4356  FLOATWRITE(logfile, path_derivative_q(model->get_learn_q(i),dim));
4357 
4358  for (q=0; model->get_learn_a(q,0)>=0; q++)
4359  {
4360  i=model->get_learn_a(q,0) ;
4361  j=model->get_learn_a(q,1) ;
4362  FLOATWRITE(logfile, path_derivative_a(i,j,dim));
4363  } ;
4364 
4365  for (q=0; model->get_learn_b(q,0)>=0; q++)
4366  {
4367  i=model->get_learn_b(q,0) ;
4368  j=model->get_learn_b(q,1) ;
4369  FLOATWRITE(logfile, path_derivative_b(i,j,dim));
4370  } ;
4371  }
4372  } ;
4373  }
4374  save_model_bin(logfile) ;
4375 
4376  result=true;
4377  SG_PRINT("\n")
4378  return result;
4379 }
4380 
4382 {
4383  bool result=false;
4384  int32_t dim,i,j,q ;
4385  int32_t num_floats=0 ;
4386 
4387  if (!model)
4388  SG_WARNING("No definitions loaded -- writing derivatives of all weights\n")
4389  else
4390  SG_INFO("writing derivatives of changed weights only\n")
4391 
4392 #ifdef USE_HMMPARALLEL
4393  int32_t num_threads = parallel->get_num_threads();
4394  pthread_t *threads=SG_MALLOC(pthread_t, num_threads);
4395  S_DIM_THREAD_PARAM *params=SG_MALLOC(S_DIM_THREAD_PARAM, num_threads);
4396 
4397  if (p_observations->get_num_vectors()<num_threads)
4398  num_threads=p_observations->get_num_vectors();
4399 #endif
4400 
4401  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4402  {
4403  if (dim%20==0)
4404  {
4405  SG_PRINT(".")
4406 
4407  } ;
4408 
4409 #ifdef USE_HMMPARALLEL
4410  if (dim%num_threads==0)
4411  {
4412  for (i=0; i<num_threads; i++)
4413  {
4414  if (dim+i<p_observations->get_num_vectors())
4415  {
4416  params[i].hmm=this ;
4417  params[i].dim=dim+i ;
4418  pthread_create(&threads[i], NULL, bw_dim_prefetch, (void*)&params[i]) ;
4419  }
4420  }
4421 
4422  for (i=0; i<num_threads; i++)
4423  {
4424  if (dim+i<p_observations->get_num_vectors())
4425  pthread_join(threads[i], NULL);
4426  }
4427  }
4428 #endif
4429 
4430  float64_t prob=model_probability(dim) ;
4431  if (!model)
4432  {
4433  if (file)
4434  {
4435  // write prob
4436  FLOATWRITE(file, prob);
4437 
4438  //derivates log(dp),log(dq)
4439  for (i=0; i<N; i++)
4440  FLOATWRITE(file, model_derivative_p(i,dim));
4441 
4442  for (i=0; i<N; i++)
4443  FLOATWRITE(file, model_derivative_q(i,dim));
4444 
4445  //derivates log(da),log(db)
4446  for (i=0; i<N; i++)
4447  for (j=0; j<N; j++)
4448  FLOATWRITE(file, model_derivative_a(i,j,dim));
4449 
4450  for (i=0; i<N; i++)
4451  for (j=0; j<M; j++)
4452  FLOATWRITE(file, model_derivative_b(i,j,dim));
4453 
4454  if (dim==0)
4455  SG_INFO("Number of parameters (including posterior prob.): %i\n", num_floats)
4456  } ;
4457  }
4458  else
4459  {
4460  if (file)
4461  {
4462  // write prob
4463  FLOATWRITE(file, prob);
4464 
4465  for (i=0; model->get_learn_p(i)>=0; i++)
4467 
4468  for (i=0; model->get_learn_q(i)>=0; i++)
4470 
4471  //derivates log(da),log(db)
4472  for (q=0; model->get_learn_a(q,1)>=0; q++)
4473  {
4474  i=model->get_learn_a(q,0) ;
4475  j=model->get_learn_a(q,1) ;
4476  FLOATWRITE(file, model_derivative_a(i,j,dim));
4477  } ;
4478 
4479  for (q=0; model->get_learn_b(q,0)>=0; q++)
4480  {
4481  i=model->get_learn_b(q,0) ;
4482  j=model->get_learn_b(q,1) ;
4483  FLOATWRITE(file, model_derivative_b(i,j,dim));
4484  } ;
4485  if (dim==0)
4486  SG_INFO("Number of parameters (including posterior prob.): %i\n", num_floats)
4487  } ;
4488  } ;
4489  }
4490  save_model_bin(file) ;
4491 
4492 #ifdef USE_HMMPARALLEL
4493  SG_FREE(threads);
4494  SG_FREE(params);
4495 #endif
4496 
4497  result=true;
4498  SG_PRINT("\n")
4499  return result;
4500 }
4501 
4503 {
4504  bool result=false;
4505  int32_t dim,i,j;
4506 
4507  if (file)
4508  {
4509 
4510  fprintf(file,"%% lambda denotes the model\n%% O denotes the observation sequence\n%% Q denotes the path\n%%\n%% calculating derivatives of P[O|lambda]=sum_{all Q}p_{Q1}b_{Q1}(O_1}*a_{Q1}{Q2}b_{Q2}(O2)*...*q_{T-1}{T}b_{QT}(O_T}q_{Q_T} against p,q,a,b\n%%\n");
4511  fprintf(file,"%% dPr[...]=[ [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4512  fprintf(file,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_NN]\n");
4513  fprintf(file,"%% ............................. \n");
4514  fprintf(file,"%% [dp_1,...,dp_N,dq_1,...,dq_N, da_11,da_12,..,da_1N,..,da_NN, db_11,.., db_MM]\n");
4515  fprintf(file,"%% ];\n%%\n\nlog(dPr) = [\n");
4516 
4517 
4518  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
4519  {
4520  fprintf(file, "[ ");
4521 
4522  //derivates log(dp),log(dq)
4523  for (i=0; i<N; i++)
4524  fprintf(file,"%e, ", (double) model_derivative_p(i, dim) ); //log (dp)
4525 
4526  for (i=0; i<N; i++)
4527  fprintf(file,"%e, ", (double) model_derivative_q(i, dim) ); //log (dq)
4528 
4529  //derivates log(da),log(db)
4530  for (i=0; i<N; i++)
4531  for (j=0; j<N; j++)
4532  fprintf(file, "%e,", (double) model_derivative_a(i,j,dim) );
4533 
4534  for (i=0; i<N; i++)
4535  for (j=0; j<M; j++)
4536  fprintf(file, "%e,", (double) model_derivative_b(i,j,dim) );
4537 
4538  fseek(file,ftell(file)-1,SEEK_SET);
4539  fprintf(file, " ];\n");
4540  }
4541 
4542 
4543  fprintf(file, "];");
4544 
4545  result=true;
4546  }
4547  return result;
4548 }
4549 
4551 {
4552  // bool result=false;
4553  const float64_t delta=5e-4 ;
4554 
4555  int32_t i ;
4556  //derivates log(da)
4557  /* for (i=0; i<N; i++)
4558  {
4559  for (int32_t j=0; j<N; j++)
4560  {
4561  float64_t old_a=get_a(i,j) ;
4562 
4563  set_a(i,j, log(exp(old_a)-delta)) ;
4564  invalidate_model() ;
4565  float64_t prob_old=exp(model_probability(-1)*p_observations->get_num_vectors()) ;
4566 
4567  set_a(i,j, log(exp(old_a)+delta)) ;
4568  invalidate_model() ;
4569  float64_t prob_new=exp(model_probability(-1)*p_observations->get_num_vectors());
4570 
4571  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4572 
4573  set_a(i,j, old_a) ;
4574  invalidate_model() ;
4575 
4576  float64_t prod_prob=model_probability(-1)*p_observations->get_num_vectors() ;
4577 
4578  float64_t deriv_calc=0 ;
4579  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4580  deriv_calc+=exp(model_derivative_a(i, j, dim)+
4581  prod_prob-model_probability(dim)) ;
4582 
4583  SG_DEBUG("da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4584  } ;
4585  } ;*/
4586  //derivates log(db)
4587  i=0;//for (i=0; i<N; i++)
4588  {
4589  for (int32_t j=0; j<M; j++)
4590  {
4591  float64_t old_b=get_b(i,j) ;
4592 
4593  set_b(i,j, log(exp(old_b)-delta)) ;
4594  invalidate_model() ;
4596 
4597  set_b(i,j, log(exp(old_b)+delta)) ;
4598  invalidate_model() ;
4600 
4601  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4602 
4603  set_b(i,j, old_b) ;
4604  invalidate_model() ;
4605 
4606  float64_t deriv_calc=0 ;
4607  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4608  {
4609  deriv_calc+=exp(model_derivative_b(i, j, dim)-model_probability(dim)) ;
4610  if (j==1)
4611  SG_INFO("deriv_calc[%i]=%e\n",dim,deriv_calc)
4612  } ;
4613 
4614  SG_ERROR("b(%i,%i)=%e db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j,exp(old_b),i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4615  } ;
4616  } ;
4617  return true ;
4618 }
4619 
4621 {
4622  bool result=false;
4623  const float64_t delta=3e-4 ;
4624 
4625  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4626  {
4627  int32_t i ;
4628  //derivates log(dp),log(dq)
4629  for (i=0; i<N; i++)
4630  {
4631  for (int32_t j=0; j<N; j++)
4632  {
4633  float64_t old_a=get_a(i,j) ;
4634 
4635  set_a(i,j, log(exp(old_a)-delta)) ;
4636  invalidate_model() ;
4637  float64_t prob_old=exp(model_probability(dim)) ;
4638 
4639  set_a(i,j, log(exp(old_a)+delta)) ;
4640  invalidate_model() ;
4641  float64_t prob_new=exp(model_probability(dim));
4642 
4643  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4644 
4645  set_a(i,j, old_a) ;
4646  invalidate_model() ;
4647  float64_t deriv_calc=exp(model_derivative_a(i, j, dim)) ;
4648 
4649  SG_DEBUG("da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4650  invalidate_model() ;
4651  } ;
4652  } ;
4653  for (i=0; i<N; i++)
4654  {
4655  for (int32_t j=0; j<M; j++)
4656  {
4657  float64_t old_b=get_b(i,j) ;
4658 
4659  set_b(i,j, log(exp(old_b)-delta)) ;
4660  invalidate_model() ;
4661  float64_t prob_old=exp(model_probability(dim)) ;
4662 
4663  set_b(i,j, log(exp(old_b)+delta)) ;
4664  invalidate_model() ;
4665  float64_t prob_new=exp(model_probability(dim));
4666 
4667  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4668 
4669  set_b(i,j, old_b) ;
4670  invalidate_model() ;
4671  float64_t deriv_calc=exp(model_derivative_b(i, j, dim));
4672 
4673  SG_DEBUG("db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/(deriv_calc))
4674  } ;
4675  } ;
4676 
4677 #ifdef TEST
4678  for (i=0; i<N; i++)
4679  {
4680  float64_t old_p=get_p(i) ;
4681 
4682  set_p(i, log(exp(old_p)-delta)) ;
4683  invalidate_model() ;
4684  float64_t prob_old=exp(model_probability(dim)) ;
4685 
4686  set_p(i, log(exp(old_p)+delta)) ;
4687  invalidate_model() ;
4688  float64_t prob_new=exp(model_probability(dim));
4689  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4690 
4691  set_p(i, old_p) ;
4692  invalidate_model() ;
4693  float64_t deriv_calc=exp(model_derivative_p(i, dim));
4694 
4695  //if (fabs(deriv_calc_old-deriv)>1e-4)
4696  SG_DEBUG("dp(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4697  } ;
4698  for (i=0; i<N; i++)
4699  {
4700  float64_t old_q=get_q(i) ;
4701 
4702  set_q(i, log(exp(old_q)-delta)) ;
4703  invalidate_model() ;
4704  float64_t prob_old=exp(model_probability(dim)) ;
4705 
4706  set_q(i, log(exp(old_q)+delta)) ;
4707  invalidate_model() ;
4708  float64_t prob_new=exp(model_probability(dim));
4709 
4710  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4711 
4712  set_q(i, old_q) ;
4713  invalidate_model() ;
4714  float64_t deriv_calc=exp(model_derivative_q(i, dim));
4715 
4716  //if (fabs(deriv_calc_old-deriv)>1e-4)
4717  SG_DEBUG("dq(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4718  } ;
4719 #endif
4720  }
4721  return result;
4722 }
4723 
4724 #ifdef USE_HMMDEBUG
4725 bool CHMM::check_path_derivatives()
4726 {
4727  bool result=false;
4728  const float64_t delta=1e-4 ;
4729 
4730  for (int32_t dim=0; dim<p_observations->get_num_vectors(); dim++)
4731  {
4732  int32_t i ;
4733  //derivates log(dp),log(dq)
4734  for (i=0; i<N; i++)
4735  {
4736  for (int32_t j=0; j<N; j++)
4737  {
4738  float64_t old_a=get_a(i,j) ;
4739 
4740  set_a(i,j, log(exp(old_a)-delta)) ;
4741  invalidate_model() ;
4742  float64_t prob_old=best_path(dim) ;
4743 
4744  set_a(i,j, log(exp(old_a)+delta)) ;
4745  invalidate_model() ;
4746  float64_t prob_new=best_path(dim);
4747 
4748  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4749 
4750  set_a(i,j, old_a) ;
4751  invalidate_model() ;
4752  float64_t deriv_calc=path_derivative_a(i, j, dim) ;
4753 
4754  SG_DEBUG("da(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4755  } ;
4756  } ;
4757  for (i=0; i<N; i++)
4758  {
4759  for (int32_t j=0; j<M; j++)
4760  {
4761  float64_t old_b=get_b(i,j) ;
4762 
4763  set_b(i,j, log(exp(old_b)-delta)) ;
4764  invalidate_model() ;
4765  float64_t prob_old=best_path(dim) ;
4766 
4767  set_b(i,j, log(exp(old_b)+delta)) ;
4768  invalidate_model() ;
4769  float64_t prob_new=best_path(dim);
4770 
4771  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4772 
4773  set_b(i,j, old_b) ;
4774  invalidate_model() ;
4775  float64_t deriv_calc=path_derivative_b(i, j, dim);
4776 
4777  SG_DEBUG("db(%i,%i) = %e:%e\t (%1.5f%%)\n", i,j, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/(deriv_calc))
4778  } ;
4779  } ;
4780 
4781  for (i=0; i<N; i++)
4782  {
4783  float64_t old_p=get_p(i) ;
4784 
4785  set_p(i, log(exp(old_p)-delta)) ;
4786  invalidate_model() ;
4787  float64_t prob_old=best_path(dim) ;
4788 
4789  set_p(i, log(exp(old_p)+delta)) ;
4790  invalidate_model() ;
4791  float64_t prob_new=best_path(dim);
4792  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4793 
4794  set_p(i, old_p) ;
4795  invalidate_model() ;
4796  float64_t deriv_calc=path_derivative_p(i, dim);
4797 
4798  //if (fabs(deriv_calc_old-deriv)>1e-4)
4799  SG_DEBUG("dp(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4800  } ;
4801  for (i=0; i<N; i++)
4802  {
4803  float64_t old_q=get_q(i) ;
4804 
4805  set_q(i, log(exp(old_q)-delta)) ;
4806  invalidate_model() ;
4807  float64_t prob_old=best_path(dim) ;
4808 
4809  set_q(i, log(exp(old_q)+delta)) ;
4810  invalidate_model() ;
4811  float64_t prob_new=best_path(dim);
4812 
4813  float64_t deriv = (prob_new-prob_old)/(2*delta) ;
4814 
4815  set_q(i, old_q) ;
4816  invalidate_model() ;
4817  float64_t deriv_calc=path_derivative_q(i, dim);
4818 
4819  //if (fabs(deriv_calc_old-deriv)>1e-4)
4820  SG_DEBUG("dq(%i) = %e:%e\t (%1.5f%%)\n", i, deriv_calc, deriv, 100.0*(deriv-deriv_calc)/deriv_calc)
4821  } ;
4822  }
4823  return result;
4824 }
4825 #endif // USE_HMMDEBUG
4826 
4827 //normalize model (sum to one constraint)
4828 void CHMM::normalize(bool keep_dead_states)
4829 {
4830  int32_t i,j;
4831  const float64_t INF=-1e10;
4832  float64_t sum_p =INF;
4833 
4834  for (i=0; i<N; i++)
4835  {
4836  sum_p=CMath::logarithmic_sum(sum_p, get_p(i));
4837 
4838  float64_t sum_b =INF;
4839  float64_t sum_a =get_q(i);
4840 
4841  for (j=0; j<N; j++)
4842  sum_a=CMath::logarithmic_sum(sum_a, get_a(i,j));
4843 
4844  if (sum_a>CMath::ALMOST_NEG_INFTY/N || (!keep_dead_states) )
4845  {
4846  for (j=0; j<N; j++)
4847  set_a(i,j, get_a(i,j)-sum_a);
4848  set_q(i, get_q(i)-sum_a);
4849  }
4850 
4851  for (j=0; j<M; j++)
4852  sum_b=CMath::logarithmic_sum(sum_b, get_b(i,j));
4853  for (j=0; j<M; j++)
4854  set_b(i,j, get_b(i,j)-sum_b);
4855  }
4856 
4857  for (i=0; i<N; i++)
4858  set_p(i, get_p(i)-sum_p);
4859 
4860  invalidate_model();
4861 }
4862 
4863 bool CHMM::append_model(CHMM* app_model)
4864 {
4865  bool result=false;
4866  const int32_t num_states=app_model->get_N();
4867  int32_t i,j;
4868 
4869  SG_DEBUG("cur N:%d M:%d\n", N, M)
4870  SG_DEBUG("old N:%d M:%d\n", app_model->get_N(), app_model->get_M())
4871  if (app_model->get_M() == get_M())
4872  {
4873  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
4874  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
4875  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
4876  //SG_PRINT("size n_b: %d\n", (N+num_states)*M)
4877  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
4878 
4879  //clear n_x
4880  for (i=0; i<N+num_states; i++)
4881  {
4882  n_p[i]=-CMath::INFTY;
4883  n_q[i]=-CMath::INFTY;
4884 
4885  for (j=0; j<N+num_states; j++)
4886  n_a[(N+num_states)*i+j]=-CMath::INFTY;
4887 
4888  for (j=0; j<M; j++)
4889  n_b[M*i+j]=-CMath::INFTY;
4890  }
4891 
4892  //copy models first
4893  // warning pay attention to the ordering of
4894  // transition_matrix_a, observation_matrix_b !!!
4895 
4896  // cur_model
4897  for (i=0; i<N; i++)
4898  {
4899  n_p[i]=get_p(i);
4900 
4901  for (j=0; j<N; j++)
4902  n_a[(N+num_states)*j+i]=get_a(i,j);
4903 
4904  for (j=0; j<M; j++)
4905  {
4906  n_b[M*i+j]=get_b(i,j);
4907  }
4908  }
4909 
4910  // append_model
4911  for (i=0; i<app_model->get_N(); i++)
4912  {
4913  n_q[i+N]=app_model->get_q(i);
4914 
4915  for (j=0; j<app_model->get_N(); j++)
4916  n_a[(N+num_states)*(j+N)+(i+N)]=app_model->get_a(i,j);
4917  for (j=0; j<app_model->get_M(); j++)
4918  n_b[M*(i+N)+j]=app_model->get_b(i,j);
4919  }
4920 
4921 
4922  // transition to the two and back
4923  for (i=0; i<N; i++)
4924  {
4925  for (j=N; j<N+num_states; j++)
4926  n_a[(N+num_states)*j + i]=CMath::logarithmic_sum(get_q(i)+app_model->get_p(j-N), n_a[(N+num_states)*j + i]);
4927  }
4928 
4930  N+=num_states;
4931 
4933 
4934  //delete + adjust pointers
4936  SG_FREE(end_state_distribution_q);
4937  SG_FREE(transition_matrix_a);
4938  SG_FREE(observation_matrix_b);
4939 
4940  transition_matrix_a=n_a;
4944 
4945  SG_WARNING("not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n")
4947  invalidate_model();
4948  }
4949  else
4950  SG_ERROR("number of observations is different for append model, doing nothing!\n")
4951 
4952  return result;
4953 }
4954 
4955 bool CHMM::append_model(CHMM* app_model, float64_t* cur_out, float64_t* app_out)
4956 {
4957  bool result=false;
4958  const int32_t num_states=app_model->get_N()+2;
4959  int32_t i,j;
4960 
4961  if (app_model->get_M() == get_M())
4962  {
4963  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
4964  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
4965  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
4966  //SG_PRINT("size n_b: %d\n", (N+num_states)*M)
4967  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
4968 
4969  //clear n_x
4970  for (i=0; i<N+num_states; i++)
4971  {
4972  n_p[i]=-CMath::INFTY;
4973  n_q[i]=-CMath::INFTY;
4974 
4975  for (j=0; j<N+num_states; j++)
4976  n_a[(N+num_states)*j+i]=-CMath::INFTY;
4977 
4978  for (j=0; j<M; j++)
4979  n_b[M*i+j]=-CMath::INFTY;
4980  }
4981 
4982  //copy models first
4983  // warning pay attention to the ordering of
4984  // transition_matrix_a, observation_matrix_b !!!
4985 
4986  // cur_model
4987  for (i=0; i<N; i++)
4988  {
4989  n_p[i]=get_p(i);
4990 
4991  for (j=0; j<N; j++)
4992  n_a[(N+num_states)*j+i]=get_a(i,j);
4993 
4994  for (j=0; j<M; j++)
4995  {
4996  n_b[M*i+j]=get_b(i,j);
4997  }
4998  }
4999 
5000  // append_model
5001  for (i=0; i<app_model->get_N(); i++)
5002  {
5003  n_q[i+N+2]=app_model->get_q(i);
5004 
5005  for (j=0; j<app_model->get_N(); j++)
5006  n_a[(N+num_states)*(j+N+2)+(i+N+2)]=app_model->get_a(i,j);
5007  for (j=0; j<app_model->get_M(); j++)
5008  n_b[M*(i+N+2)+j]=app_model->get_b(i,j);
5009  }
5010 
5011  //initialize the two special states
5012 
5013  // output
5014  for (i=0; i<M; i++)
5015  {
5016  n_b[M*N+i]=cur_out[i];
5017  n_b[M*(N+1)+i]=app_out[i];
5018  }
5019 
5020  // transition to the two and back
5021  for (i=0; i<N+num_states; i++)
5022  {
5023  // the first state is only connected to the second
5024  if (i==N+1)
5025  n_a[(N+num_states)*i + N]=0;
5026 
5027  // only states of the cur_model can reach the
5028  // first state
5029  if ((uint32_t)i<(uint32_t)N)
5030  n_a[(N+num_states)*N+i]=get_q(i);
5031 
5032  // the second state is only connected to states of
5033  // the append_model (with probab app->p(i))
5034  if (i>=N+2)
5035  n_a[(N+num_states)*i+(N+1)]=app_model->get_p(i-(N+2));
5036  }
5037 
5039  N+=num_states;
5040 
5042 
5043  //delete + adjust pointers
5045  SG_FREE(end_state_distribution_q);
5046  SG_FREE(transition_matrix_a);
5047  SG_FREE(observation_matrix_b);
5048 
5049  transition_matrix_a=n_a;
5053 
5054  SG_WARNING("not normalizing anymore, call normalize_hmm to make sure the hmm is valid!!\n")
5056  invalidate_model();
5057  }
5058 
5059  return result;
5060 }
5061 
5062 
5063 void CHMM::add_states(int32_t num_states, float64_t default_value)
5064 {
5065  int32_t i,j;
5066  const float64_t MIN_RAND=1e-2; //this is the range of the random values for the new variables
5067  const float64_t MAX_RAND=2e-1;
5068 
5069  float64_t* n_p=SG_MALLOC(float64_t, N+num_states);
5070  float64_t* n_q=SG_MALLOC(float64_t, N+num_states);
5071  float64_t* n_a=SG_MALLOC(float64_t, (N+num_states)*(N+num_states));
5072  //SG_PRINT("size n_b: %d\n", (N+num_states)*M)
5073  float64_t* n_b=SG_MALLOC(float64_t, (N+num_states)*M);
5074 
5075  // warning pay attention to the ordering of
5076  // transition_matrix_a, observation_matrix_b !!!
5077  for (i=0; i<N; i++)
5078  {
5079  n_p[i]=get_p(i);
5080  n_q[i]=get_q(i);
5081 
5082  for (j=0; j<N; j++)
5083  n_a[(N+num_states)*j+i]=get_a(i,j);
5084 
5085  for (j=0; j<M; j++)
5086  n_b[M*i+j]=get_b(i,j);
5087  }
5088 
5089  for (i=N; i<N+num_states; i++)
5090  {
5091  n_p[i]=VAL_MACRO;
5092  n_q[i]=VAL_MACRO;
5093 
5094  for (j=0; j<N; j++)
5095  n_a[(N+num_states)*i+j]=VAL_MACRO;
5096 
5097  for (j=0; j<N+num_states; j++)
5098  n_a[(N+num_states)*j+i]=VAL_MACRO;
5099 
5100  for (j=0; j<M; j++)
5101  n_b[M*i+j]=VAL_MACRO;
5102  }
5104  N+=num_states;
5105 
5107 
5108  //delete + adjust pointers
5110  SG_FREE(end_state_distribution_q);
5111  SG_FREE(transition_matrix_a);
5112  SG_FREE(observation_matrix_b);
5113 
5114  transition_matrix_a=n_a;
5118 
5119  invalidate_model();
5120  normalize();
5121 }
5122 
5124 {
5125  for (int32_t i=0; i<N; i++)
5126  {
5127  int32_t j;
5128 
5129  if (exp(get_p(i)) < value)
5131 
5132  if (exp(get_q(i)) < value)
5134 
5135  for (j=0; j<N; j++)
5136  {
5137  if (exp(get_a(i,j)) < value)
5139  }
5140 
5141  for (j=0; j<M; j++)
5142  {
5143  if (exp(get_b(i,j)) < value)
5145  }
5146  }
5147  normalize();
5148  invalidate_model();
5149 }
5150 
5151 bool CHMM::linear_train(bool right_align)
5152 {
5153  if (p_observations)
5154  {
5155  int32_t histsize=(get_M()*get_N());
5156  int32_t* hist=SG_MALLOC(int32_t, histsize);
5157  int32_t* startendhist=SG_MALLOC(int32_t, get_N());
5158  int32_t i,dim;
5159 
5161 
5162  for (i=0; i<histsize; i++)
5163  hist[i]=0;
5164 
5165  for (i=0; i<get_N(); i++)
5166  startendhist[i]=0;
5167 
5168  if (right_align)
5169  {
5170  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
5171  {
5172  int32_t len=0;
5173  bool free_vec;
5174  uint16_t* obs=p_observations->get_feature_vector(dim, len, free_vec);
5175 
5176  ASSERT(len<=get_N())
5177  startendhist[(get_N()-len)]++;
5178 
5179  for (i=0;i<len;i++)
5180  hist[(get_N()-len+i)*get_M() + *obs++]++;
5181 
5182  p_observations->free_feature_vector(obs, dim, free_vec);
5183  }
5184 
5185  set_q(get_N()-1, 1);
5186  for (i=0; i<get_N()-1; i++)
5187  set_q(i, 0);
5188 
5189  for (i=0; i<get_N(); i++)
5190  set_p(i, startendhist[i]+PSEUDO);
5191 
5192  for (i=0;i<get_N();i++)
5193  {
5194  for (int32_t j=0; j<get_N(); j++)
5195  {
5196  if (i==j-1)
5197  set_a(i,j, 1);
5198  else
5199  set_a(i,j, 0);
5200  }
5201  }
5202  }
5203  else
5204  {
5205  for (dim=0; dim<p_observations->get_num_vectors(); dim++)
5206  {
5207  int32_t len=0;
5208  bool free_vec;
5209  uint16_t* obs=p_observations->get_feature_vector(dim, len, free_vec);
5210 
5211  ASSERT(len<=get_N())
5212  for (i=0;i<len;i++)
5213  hist[i*get_M() + *obs++]++;
5214 
5215  startendhist[len-1]++;
5216 
5217  p_observations->free_feature_vector(obs, dim, free_vec);
5218  }
5219 
5220  set_p(0, 1);
5221  for (i=1; i<get_N(); i++)
5222  set_p(i, 0);
5223 
5224  for (i=0; i<get_N(); i++)
5225  set_q(i, startendhist[i]+PSEUDO);
5226 
5227  int32_t total=p_observations->get_num_vectors();
5228 
5229  for (i=0;i<get_N();i++)
5230  {
5231  total-= startendhist[i] ;
5232 
5233  for (int32_t j=0; j<get_N(); j++)
5234  {
5235  if (i==j-1)
5236  set_a(i,j, total+PSEUDO);
5237  else
5238  set_a(i,j, 0);
5239  }
5240  }
5241  ASSERT(total==0)
5242  }
5243 
5244  for (i=0;i<get_N();i++)
5245  {
5246  for (int32_t j=0; j<get_M(); j++)
5247  {
5248  float64_t sum=0;
5249  int32_t offs=i*get_M()+ p_observations->get_masked_symbols((uint16_t) j, (uint8_t) 254);
5250 
5251  for (int32_t k=0; k<p_observations->get_original_num_symbols(); k++)
5252  sum+=hist[offs+k];
5253 
5254  set_b(i,j, (PSEUDO+hist[i*get_M()+j])/(sum+PSEUDO*p_observations->get_original_num_symbols()));
5255  }
5256  }
5257 
5258  SG_FREE(hist);
5259  SG_FREE(startendhist);
5260  convert_to_log();
5261  invalidate_model();
5262  return true;
5263  }
5264  else
5265  return false;
5266 }
5267 
5269 {
5270  ASSERT(obs)
5271  p_observations=obs;
5272  SG_REF(obs);
5273 
5274  if (obs)
5275  if (obs->get_num_symbols() > M)
5276  SG_ERROR("number of symbols in observation (%ld) larger than M (%d)\n", (long) obs->get_num_symbols(), M)
5277 
5278  if (!reused_caches)
5279  {
5280 #ifdef USE_HMMPARALLEL_STRUCTURES
5281  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5282  {
5283  SG_FREE(alpha_cache[i].table);
5284  SG_FREE(beta_cache[i].table);
5285  SG_FREE(states_per_observation_psi[i]);
5286  SG_FREE(path[i]);
5287 
5288  alpha_cache[i].table=NULL;
5289  beta_cache[i].table=NULL;
5291  path[i]=NULL;
5292  } ;
5293 #else
5294  SG_FREE(alpha_cache.table);
5295  SG_FREE(beta_cache.table);
5296  SG_FREE(states_per_observation_psi);
5297  SG_FREE(path);
5298 
5299  alpha_cache.table=NULL;
5300  beta_cache.table=NULL;
5302  path=NULL;
5303 
5304 #endif //USE_HMMPARALLEL_STRUCTURES
5305  }
5306 
5307  invalidate_model();
5308 }
5309 
5311 {
5312  ASSERT(obs)
5313  SG_REF(obs);
5314  p_observations=obs;
5315 
5316  /* from Distribution, necessary for calls to base class methods, like
5317  * get_log_likelihood_sample():
5318  */
5319  SG_REF(obs);
5320  features=obs;
5321 
5322  SG_DEBUG("num symbols alphabet: %ld\n", obs->get_alphabet()->get_num_symbols())
5323  SG_DEBUG("num symbols: %ld\n", obs->get_num_symbols())
5324  SG_DEBUG("M: %d\n", M)
5325 
5326  if (obs)
5327  {
5328  if (obs->get_num_symbols() > M)
5329  {
5330  SG_ERROR("number of symbols in observation (%ld) larger than M (%d)\n", (long) obs->get_num_symbols(), M)
5331  }
5332  }
5333 
5334  if (!reused_caches)
5335  {
5336 #ifdef USE_HMMPARALLEL_STRUCTURES
5337  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5338  {
5339  SG_FREE(alpha_cache[i].table);
5340  SG_FREE(beta_cache[i].table);
5341  SG_FREE(states_per_observation_psi[i]);
5342  SG_FREE(path[i]);
5343 
5344  alpha_cache[i].table=NULL;
5345  beta_cache[i].table=NULL;
5347  path[i]=NULL;
5348  } ;
5349 #else
5350  SG_FREE(alpha_cache.table);
5351  SG_FREE(beta_cache.table);
5352  SG_FREE(states_per_observation_psi);
5353  SG_FREE(path);
5354 
5355  alpha_cache.table=NULL;
5356  beta_cache.table=NULL;
5358  path=NULL;
5359 
5360 #endif //USE_HMMPARALLEL_STRUCTURES
5361  }
5362 
5363  if (obs!=NULL)
5364  {
5365  int32_t max_T=obs->get_max_vector_length();
5366 
5367  if (lambda)
5368  {
5369 #ifdef USE_HMMPARALLEL_STRUCTURES
5370  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5371  {
5372  this->alpha_cache[i].table= lambda->alpha_cache[i].table;
5373  this->beta_cache[i].table= lambda->beta_cache[i].table;
5375  this->path[i]=lambda->path[i];
5376  } ;
5377 #else
5378  this->alpha_cache.table= lambda->alpha_cache.table;
5379  this->beta_cache.table= lambda->beta_cache.table;
5381  this->path=lambda->path;
5382 #endif //USE_HMMPARALLEL_STRUCTURES
5383 
5384  this->reused_caches=true;
5385  }
5386  else
5387  {
5388  this->reused_caches=false;
5389 #ifdef USE_HMMPARALLEL_STRUCTURES
5390  SG_INFO("allocating mem for path-table of size %.2f Megabytes (%d*%d) each:\n", ((float32_t)max_T)*N*sizeof(T_STATES)/(1024*1024), max_T, N)
5391  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5392  {
5393  if ((states_per_observation_psi[i]=SG_MALLOC(T_STATES,max_T*N))!=NULL)
5394  SG_DEBUG("path_table[%i] successfully allocated\n",i)
5395  else
5396  SG_ERROR("failed allocating memory for path_table[%i].\n",i)
5397  path[i]=SG_MALLOC(T_STATES, max_T);
5398  }
5399 #else // no USE_HMMPARALLEL_STRUCTURES
5400  SG_INFO("allocating mem of size %.2f Megabytes (%d*%d) for path-table ....", ((float32_t)max_T)*N*sizeof(T_STATES)/(1024*1024), max_T, N)
5401  if ((states_per_observation_psi=SG_MALLOC(T_STATES,max_T*N)) != NULL)
5402  SG_DONE()
5403  else
5404  SG_ERROR("failed.\n")
5405 
5406  path=SG_MALLOC(T_STATES, max_T);
5407 #endif // USE_HMMPARALLEL_STRUCTURES
5408 #ifdef USE_HMMCACHE
5409  SG_INFO("allocating mem for caches each of size %.2f Megabytes (%d*%d) ....\n", ((float32_t)max_T)*N*sizeof(T_ALPHA_BETA_TABLE)/(1024*1024), max_T, N)
5410 
5411 #ifdef USE_HMMPARALLEL_STRUCTURES
5412  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5413  {
5414  if ((alpha_cache[i].table=SG_MALLOC(T_ALPHA_BETA_TABLE, max_T*N))!=NULL)
5415  SG_DEBUG("alpha_cache[%i].table successfully allocated\n",i)
5416  else
5417  SG_ERROR("allocation of alpha_cache[%i].table failed\n",i)
5418 
5419  if ((beta_cache[i].table=SG_MALLOC(T_ALPHA_BETA_TABLE, max_T*N)) != NULL)
5420  SG_DEBUG("beta_cache[%i].table successfully allocated\n",i)
5421  else
5422  SG_ERROR("allocation of beta_cache[%i].table failed\n",i)
5423  } ;
5424 #else // USE_HMMPARALLEL_STRUCTURES
5425  if ((alpha_cache.table=SG_MALLOC(T_ALPHA_BETA_TABLE, max_T*N)) != NULL)
5426  SG_DEBUG("alpha_cache.table successfully allocated\n")
5427  else
5428  SG_ERROR("allocation of alpha_cache.table failed\n")
5429 
5430  if ((beta_cache.table=SG_MALLOC(T_ALPHA_BETA_TABLE, max_T*N)) != NULL)
5431  SG_DEBUG("beta_cache.table successfully allocated\n")
5432  else
5433  SG_ERROR("allocation of beta_cache.table failed\n")
5434 
5435 #endif // USE_HMMPARALLEL_STRUCTURES
5436 #else // USE_HMMCACHE
5437 #ifdef USE_HMMPARALLEL_STRUCTURES
5438  for (int32_t i=0; i<parallel->get_num_threads(); i++)
5439  {
5440  alpha_cache[i].table=NULL ;
5441  beta_cache[i].table=NULL ;
5442  } ;
5443 #else //USE_HMMPARALLEL_STRUCTURES
5444  alpha_cache.table=NULL ;
5445  beta_cache.table=NULL ;
5446 #endif //USE_HMMPARALLEL_STRUCTURES
5447 #endif //USE_HMMCACHE
5448  }
5449  }
5450 
5451  //initialize pat/mod_prob as not calculated
5452  invalidate_model();
5453 }
5454 
5455 bool CHMM::permutation_entropy(int32_t window_width, int32_t sequence_number)
5456 {
5457  if (p_observations && window_width>0 &&
5458  ( sequence_number<0 || sequence_number < p_observations->get_num_vectors()))
5459  {
5460  int32_t min_sequence=sequence_number;
5461  int32_t max_sequence=sequence_number;
5462 
5463  if (sequence_number<0)
5464  {
5465  min_sequence=0;
5466  max_sequence=p_observations->get_num_vectors();
5467  SG_INFO("numseq: %d\n", max_sequence)
5468  }
5469 
5470  SG_INFO("min_sequence: %d max_sequence: %d\n", min_sequence, max_sequence)
5471  for (sequence_number=min_sequence; sequence_number<max_sequence; sequence_number++)
5472  {
5473  int32_t sequence_length=0;
5474  bool free_vec;
5475  uint16_t* obs=p_observations->get_feature_vector(sequence_number, sequence_length, free_vec);
5476 
5477  int32_t histsize=get_M();
5478  int64_t* hist=SG_MALLOC(int64_t, histsize);
5479  int32_t i,j;
5480 
5481  for (i=0; i<sequence_length-window_width; i++)
5482  {
5483  for (j=0; j<histsize; j++)
5484  hist[j]=0;
5485 
5486  uint16_t* ptr=&obs[i];
5487  for (j=0; j<window_width; j++)
5488  {
5489  hist[*ptr++]++;
5490  }
5491 
5492  float64_t perm_entropy=0;
5493  for (j=0; j<get_M(); j++)
5494  {
5495  float64_t p=
5496  (((float64_t) hist[j])+PSEUDO)/
5497  (window_width+get_M()*PSEUDO);
5498  perm_entropy+=p*log(p);
5499  }
5500 
5501  SG_PRINT("%f\n", perm_entropy)
5502  }
5503  p_observations->free_feature_vector(obs, sequence_number, free_vec);
5504 
5505  SG_FREE(hist);
5506  }
5507  return true;
5508  }
5509  else
5510  return false;
5511 }
5512 
5513 float64_t CHMM::get_log_derivative(int32_t num_param, int32_t num_example)
5514 {
5515  if (num_param<N)
5516  return model_derivative_p(num_param, num_example);
5517  else if (num_param<2*N)
5518  return model_derivative_q(num_param-N, num_example);
5519  else if (num_param<N*(N+2))
5520  {
5521  int32_t k=num_param-2*N;
5522  int32_t i=k/N;
5523  int32_t j=k%N;
5524  return model_derivative_a(i,j, num_example);
5525  }
5526  else if (num_param<N*(N+2+M))
5527  {
5528  int32_t k=num_param-N*(N+2);
5529  int32_t i=k/M;
5530  int32_t j=k%M;
5531  return model_derivative_b(i,j, num_example);
5532  }
5533 
5534  ASSERT(false)
5535  return -1;
5536 }
5537 
5539 {
5540  if (num_param<N)
5541  return get_p(num_param);
5542  else if (num_param<2*N)
5543  return get_q(num_param-N);
5544  else if (num_param<N*(N+2))
5545  return transition_matrix_a[num_param-2*N];
5546  else if (num_param<N*(N+2+M))
5547  return observation_matrix_b[num_param-N*(N+2)];
5548 
5549  ASSERT(false)
5550  return -1;
5551 }
5552 
5553 
5554 //convergence criteria -tobeadjusted-
5555 bool CHMM::converged(float64_t x, float64_t y)
5556 {
5557  float64_t diff=y-x;
5558  float64_t absdiff=fabs(diff);
5559 
5560  SG_INFO("\n #%03d\tbest result so far: %G (eps: %f)", iteration_count, y, diff)
5561 
5562  if (iteration_count--==0 || (absdiff<epsilon && conv_it<=0))
5563  {
5565  SG_INFO("...finished\n")
5566  conv_it=5;
5567  return true;
5568  }
5569  else
5570  {
5571  if (absdiff<epsilon)
5572  conv_it--;
5573  else
5574  conv_it=5;
5575 
5576  return false;
5577  }
5578 }
5579 
5581 {
5582  CHMM* estimate=new CHMM(this);
5583  CHMM* working=this;
5584  float64_t prob_max=-CMath::INFTY;
5585  float64_t prob=-CMath::INFTY;
5588 
5589  while (!converged(prob, prob_train) && (!CSignal::cancel_computations()))
5590  {
5591  CMath::swap(working, estimate);
5592  prob=prob_train;
5593 
5594  switch (type) {
5595  case BW_NORMAL:
5596  working->estimate_model_baum_welch(estimate); break;
5597  case BW_TRANS:
5598  working->estimate_model_baum_welch_trans(estimate); break;
5599  case BW_DEFINED:
5600  working->estimate_model_baum_welch_defined(estimate); break;
5601  case VIT_NORMAL:
5602  working->estimate_model_viterbi(estimate); break;
5603  case VIT_DEFINED:
5604  working->estimate_model_viterbi_defined(estimate); break;
5605  }
5606  prob_train=estimate->model_probability();
5607 
5608  if (prob_max<prob_train)
5609  prob_max=prob_train;
5610  }
5611 
5612  if (estimate == this)
5613  {
5614  estimate->copy_model(working);
5615  delete working;
5616  }
5617  else
5618  delete estimate;
5619 
5620  return true;
5621 }
int32_t get_learn_p(int32_t offset) const
get entry out of learn_p vector
Definition: HMM.h:123
int32_t * learn_p
start states to be learned
Definition: HMM.h:312
virtual int32_t get_max_vector_length()
SGVector< ST > get_feature_vector(int32_t num)
void set_observation_nocache(CStringFeatures< uint16_t > *obs)
Definition: HMM.cpp:5268
static const uint8_t B_T
Definition: Alphabet.h:332
float64_t * transition_matrix_a
transition matrix
Definition: HMM.h:1218
#define SG_INFO(...)
Definition: SGIO.h:118
E_STATE
Definition: HMM.cpp:51
bool mod_prob_updated
true if model probability is up to date
Definition: HMM.h:1247
void chop(float64_t value)
set any model parameter with probability smaller than value to ZERO
Definition: HMM.cpp:5123
#define SG_DONE()
Definition: SGIO.h:157
float64_t backward_comp(int32_t time, int32_t state, int32_t dimension)
Definition: HMM.cpp:923
#define ARRAY_SIZE
Definition: HMM.cpp:26
int32_t N
number of states
Definition: HMM.h:1197
void convert_to_log()
convert model to log probabilities
Definition: HMM.cpp:2395
float64_t backward(int32_t time, int32_t state, int32_t dimension)
inline proxies for backward pass
Definition: HMM.h:1560
float64_t get_const_p_val(int32_t offset) const
get value out of const_p_val vector
Definition: HMM.h:171
static const int32_t GOTp
Definition: HMM.h:1341
bool save_likelihood(FILE *file)
Definition: HMM.cpp:4128
void close_bracket(FILE *file)
expect closing bracket
Definition: HMM.cpp:2825
T_STATES * P_STATES
Definition: HMM.h:64
int32_t conv_it
Definition: HMM.h:1235
static int is_finite(double f)
checks whether a float is finite
Definition: Math.cpp:266
float64_t * const_a_val
values for transitions that have constant probability
Definition: HMM.h:338
Model()
Constructor - initializes all variables/structures.
Definition: HMM.cpp:81
void set_const_p(int32_t offset, int32_t value)
set value in const_p vector
Definition: HMM.h:235
bool get_numbuffer(FILE *file, char *buffer, int32_t length)
put a sequence of numbers into the buffer
Definition: HMM.cpp:2865
int32_t get_M() const
access function for number of observations M
Definition: HMM.h:984
float64_t pat_prob
probability of best path
Definition: HMM.h:1241
float64_t get_const_b_val(int32_t line) const
get value out of const_b_val vector
Definition: HMM.h:165
static const uint8_t B_G
Definition: Alphabet.h:330
int32_t get_num_threads() const
Definition: Parallel.cpp:78
bool save_model(FILE *file)
Definition: HMM.cpp:3978
void set_observations(CStringFeatures< uint16_t > *obs, CHMM *hmm=NULL)
Definition: HMM.cpp:5310
void set_const_p_val(int32_t offset, float64_t value)
set value in const_p_val vector
Definition: HMM.h:259
Definition: HMM.cpp:69
static const float64_t INFTY
infinity
Definition: Math.h:2048
Definition: HMM.cpp:59
T_STATES * states_per_observation_psi
backtracking table for viterbi can be terrible HUGE O(T*N)
Definition: HMM.h:1316
float64_t forward(int32_t time, int32_t state, int32_t dimension)
inline proxies for forward pass
Definition: HMM.h:1543
float64_t * const_q_val
values for end states that have constant probability
Definition: HMM.h:347
Definition: HMM.cpp:58
bool all_path_prob_updated
true if path probability is up to date
Definition: HMM.h:1250
float64_t * arrayN2
Definition: HMM.h:1277
#define FLOATWRITE(file, value)
Definition: HMM.cpp:4147
float64_t epsilon
convergence criterion epsilon
Definition: HMM.h:1234
void estimate_model_baum_welch_defined(CHMM *train)
Definition: HMM.cpp:1771
static const int32_t GOTconst_p
Definition: HMM.h:1358
viterbi only for defined transitions/observations
Definition: HMM.h:80
virtual int32_t get_num_vectors() const
standard viterbi
Definition: HMM.h:78
int32_t get_const_p(int32_t offset) const
get entry out of const_p vector
Definition: HMM.h:147
void invalidate_model()
Definition: HMM.cpp:2717
static const uint8_t B_C
Definition: Alphabet.h:328
baum welch only for defined transitions/observations
Definition: HMM.h:76
bool reused_caches
Definition: HMM.h:1265
bool linear_train(bool right_align=false)
estimates linear model from observations.
Definition: HMM.cpp:5151
bool save_model_bin(FILE *file)
Definition: HMM.cpp:4149
int32_t * learn_b
emissions to be learned
Definition: HMM.h:309
float64_t * transition_matrix_A
matrix of absolute counts of transitions
Definition: HMM.h:1212
#define SG_ERROR(...)
Definition: SGIO.h:129
static const int32_t GOTlearn_a
Definition: HMM.h:1346
float64_t path_derivative_b(T_STATES i, uint16_t j, int32_t dimension)
computes d log p(lambda,best_path)/d b_ij
Definition: HMM.h:1478
T_ALPHA_BETA beta_cache
cache for backward variables can be terrible HUGE O(T*N)
Definition: HMM.h:1313
float64_t get_b(T_STATES line_, uint16_t column) const
Definition: HMM.h:1157
Definition: HMM.cpp:60
bool path_prob_updated
true if path probability is up to date
Definition: HMM.h:1322
bool save_likelihood_bin(FILE *file)
Definition: HMM.cpp:4111
int32_t line
Definition: HMM.h:1203
bool baum_welch_viterbi_train(BaumWelchViterbiType type)
Definition: HMM.cpp:5580
static const int32_t GOTO
Definition: HMM.h:1335
float64_t forward_comp_old(int32_t time, int32_t state, int32_t dimension)
Definition: HMM.cpp:791
float64_t * observation_matrix_B
matrix of absolute counts of observations within each state
Definition: HMM.h:1215
float64_t get_A(T_STATES line_, T_STATES column) const
Definition: HMM.h:1115
float64_t T_ALPHA_BETA_TABLE
type for alpha/beta caching table
Definition: HMM.h:35
void free_feature_vector(ST *feat_vec, int32_t num, bool dofree)
floatmax_t get_original_num_symbols()
Parallel * parallel
Definition: SGObject.h:540
Base class Distribution from which all methods implementing a distribution are derived.
Definition: Distribution.h:44
bool check_model_derivatives()
numerically check whether derivates were calculated right
Definition: HMM.cpp:4620
static const int32_t GOTlearn_p
Definition: HMM.h:1350
void open_bracket(FILE *file)
expect open bracket.
Definition: HMM.cpp:2804
int32_t get_learn_q(int32_t offset) const
get entry out of learn_q vector
Definition: HMM.h:129
#define SG_REF(x)
Definition: SGObject.h:54
float64_t get_pseudo() const
returns current pseudo value
Definition: HMM.h:752
float64_t get_B(T_STATES line_, uint16_t column) const
Definition: HMM.h:1143
static uint64_t random()
Definition: Math.h:1019
float64_t * const_b_val
values for emissions that have constant probability
Definition: HMM.h:341
static const int32_t GOTlearn_q
Definition: HMM.h:1352
virtual bool train(CFeatures *data=NULL)
Definition: HMM.cpp:489
bool save_model_derivatives(FILE *file)
Definition: HMM.cpp:4502
static const float64_t ALMOST_NEG_INFTY
almost neg (log) infinity
Definition: Math.h:2052
virtual float64_t get_log_model_parameter(int32_t num_param)
Definition: HMM.cpp:5538
void estimate_model_baum_welch_old(CHMM *train)
Definition: HMM.cpp:1616
int32_t path_prob_dimension
dimension for which path_prob was calculated
Definition: HMM.h:1325
virtual ~Model()
Destructor - cleans up.
Definition: HMM.cpp:121
standard baum welch
Definition: HMM.h:72
#define VAL_MACRO
Definition: HMM.cpp:25
int32_t * learn_q
end states to be learned
Definition: HMM.h:315
bool load_model(FILE *file)
Definition: HMM.cpp:2974
Definition: HMM.cpp:53
BaumWelchViterbiType
Definition: HMM.h:69
void estimate_model_baum_welch_trans(CHMM *train)
Definition: HMM.cpp:1701
float64_t model_probability(int32_t dimension=-1)
inline proxy for model probability.
Definition: HMM.h:574
int32_t path_deriv_dimension
dimension for which path_deriv was calculated
Definition: HMM.h:1253
int32_t * const_a
transitions that have constant probability
Definition: HMM.h:325
#define SG_PRINT(...)
Definition: SGIO.h:137
static const int32_t GOTb
Definition: HMM.h:1339
ST get_masked_symbols(ST symbol, uint8_t mask)
bool status
Definition: HMM.h:1262
#define ASSERT(x)
Definition: SGIO.h:201
Definition: HMM.cpp:70
float64_t mod_prob
probability of model
Definition: HMM.h:1244
bool check_model_derivatives_combined()
Definition: HMM.cpp:4550
float64_t * arrayN1
Definition: HMM.h:1275
Definition: HMM.cpp:57
float64_t model_derivative_q(T_STATES i, int32_t dimension)
Definition: HMM.h:1418
float64_t * const_p_val
values for start states that have constant probability
Definition: HMM.h:344
T_STATES get_psi(int32_t time, T_STATES state, int32_t dimension) const
Definition: HMM.h:1173
void set_p(T_STATES offset, float64_t value)
Definition: HMM.h:1003
Definition: HMM.cpp:56
double float64_t
Definition: common.h:50
bool permutation_entropy(int32_t window_width, int32_t sequence_number)
compute permutation entropy
Definition: HMM.cpp:5455
static const int32_t GOTconst_a
Definition: HMM.h:1354
float64_t * end_state_distribution_q
distribution of end-states
Definition: HMM.h:1224
void add_states(int32_t num_states, float64_t default_val=0)
Definition: HMM.cpp:5063
float64_t PSEUDO
define pseudocounts against overfitting
Definition: HMM.h:1200
int32_t get_num_symbols() const
Definition: Alphabet.h:139
float64_t all_pat_prob
probability of best path
Definition: HMM.h:1238
float64_t path_derivative_a(T_STATES i, T_STATES j, int32_t dimension)
computes d log p(lambda,best_path)/d a_ij
Definition: HMM.h:1471
void set_const_q_val(int32_t offset, float64_t value)
set value in const_q_val vector
Definition: HMM.h:265
bool save_model_derivatives_bin(FILE *file)
Definition: HMM.cpp:4381
float64_t get_q(T_STATES offset) const
Definition: HMM.h:1088
int32_t * const_q
end states that have constant probability
Definition: HMM.h:334
CStringFeatures< uint16_t > * p_observations
observation matrix
Definition: HMM.h:1206
bool save_path_derivatives(FILE *file)
Definition: HMM.cpp:4251
virtual ST get_feature(int32_t vec_num, int32_t feat_num)
virtual EFeatureClass get_feature_class() const =0
void set_learn_a(int32_t offset, int32_t value)
set value in learn_a matrix
Definition: HMM.h:199
void set_learn_q(int32_t offset, int32_t value)
set value in learn_q vector
Definition: HMM.h:217
void set_A(T_STATES line_, T_STATES column, float64_t value)
Definition: HMM.h:1017
void set_q(T_STATES offset, float64_t value)
Definition: HMM.h:990
void set_B(T_STATES line_, uint16_t column, float64_t value)
Definition: HMM.h:1045
int32_t get_const_q(int32_t offset) const
get entry out of const_q vector
Definition: HMM.h:153
int32_t iterations
convergence criterion iterations
Definition: HMM.h:1230
float64_t get_const_q_val(int32_t offset) const
get value out of const_q_val vector
Definition: HMM.h:177
static const int32_t GOTq
Definition: HMM.h:1343
Definition: HMM.cpp:55
static const int32_t GOTlearn_b
Definition: HMM.h:1348
Model * model
Definition: HMM.h:1209
float64_t * observation_matrix_b
distribution of observations within each state
Definition: HMM.h:1227
T_STATES * get_path(int32_t dim, float64_t &prob)
Definition: HMM.cpp:4074
float64_t model_derivative_a(T_STATES i, T_STATES j, int32_t dimension)
computes log dp(lambda)/d a_ij.
Definition: HMM.h:1424
static bool cancel_computations()
Definition: Signal.h:86
bool loglikelihood
Definition: HMM.h:1259
float64_t best_path(int32_t dimension)
Definition: HMM.cpp:1154
int32_t get_const_a(int32_t line, int32_t column) const
get entry out of const_a matrix
Definition: HMM.h:135
float64_t model_probability_comp()
Definition: HMM.cpp:1282
void set_const_b_val(int32_t offset, float64_t value)
set value in const_b_val vector
Definition: HMM.h:253
bool path_deriv_updated
true if path derivative is up to date
Definition: HMM.h:1256
float float32_t
Definition: common.h:49
virtual float64_t get_log_derivative(int32_t num_param, int32_t num_example)
Definition: HMM.cpp:5513
bool append_model(CHMM *append_model, float64_t *cur_out, float64_t *app_out)
Definition: HMM.cpp:4955
void estimate_model_viterbi(CHMM *train)
Definition: HMM.cpp:1947
int32_t M
number of observation symbols eg. ACGT -> 0123
Definition: HMM.h:1194
void sort_learn_a()
sorts learn_a matrix
Definition: HMM.h:95
void set_learn_p(int32_t offset, int32_t value)
set value in learn_p vector
Definition: HMM.h:211
uint8_t T_STATES
Definition: HMM.h:62
bool load_definitions(FILE *file, bool verbose, bool initialize=true)
Definition: HMM.cpp:3273
void set_learn_b(int32_t offset, int32_t value)
set value in learn_b matrix
Definition: HMM.h:205
float64_t * initial_state_distribution_p
initial distribution of states
Definition: HMM.h:1221
bool save_path_derivatives_bin(FILE *file)
Definition: HMM.cpp:4298
baum welch only for specified transitions
Definition: HMM.h:74
#define SG_UNREF(x)
Definition: SGObject.h:55
#define SG_DEBUG(...)
Definition: SGIO.h:107
float64_t get_a(T_STATES line_, T_STATES column) const
Definition: HMM.h:1129
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
void clear_model_defined()
initializes only parameters in learn_x with log(PSEUDO)
Definition: HMM.cpp:2678
float64_t path_derivative_p(T_STATES i, int32_t dimension)
computes d log p(lambda,best_path)/d p_i
Definition: HMM.h:1457
float64_t forward_comp(int32_t time, int32_t state, int32_t dimension)
Definition: HMM.cpp:687
float64_t model_derivative_p(T_STATES i, int32_t dimension)
Definition: HMM.h:1410
float64_t get_p(T_STATES offset) const
Definition: HMM.h:1101
int32_t * const_b
emissions that have constant probability
Definition: HMM.h:328
bool comma_or_space(FILE *file)
expect comma or space.
Definition: HMM.cpp:2838
void estimate_model_viterbi_defined(CHMM *train)
Definition: HMM.cpp:2074
void sort_learn_b()
sorts learn_b matrix
Definition: HMM.h:101
The class Features is the base class of all feature objects.
Definition: Features.h:68
void init_model_defined()
Definition: HMM.cpp:2508
bool initialize_hmm(Model *model, float64_t PSEUDO, FILE *model_file=NULL)
Definition: HMM.cpp:598
static const int32_t GOTconst_b
Definition: HMM.h:1356
void free_state_dependend_arrays()
free memory that depends on N
Definition: HMM.cpp:560
void set_const_a_val(int32_t offset, float64_t value)
set value in const_a_val vector
Definition: HMM.h:247
bool alloc_state_dependend_arrays()
allocates memory that depends on N
Definition: HMM.cpp:503
void set_const_a(int32_t offset, int32_t value)
set value in const_a matrix
Definition: HMM.h:223
void set_b(T_STATES line_, uint16_t column, float64_t value)
Definition: HMM.h:1059
float64_t path_derivative_q(T_STATES i, int32_t dimension)
computes d log p(lambda,best_path)/d q_i
Definition: HMM.h:1464
static void swap(T &a, T &b)
Definition: Math.h:438
int32_t get_learn_a(int32_t line, int32_t column) const
get entry out of learn_a matrix
Definition: HMM.h:111
float64_t backward_comp_old(int32_t time, int32_t state, int32_t dimension)
Definition: HMM.cpp:1022
static const int32_t GOTM
Definition: HMM.h:1333
int32_t iteration_count
Definition: HMM.h:1231
#define SG_WARNING(...)
Definition: SGIO.h:128
virtual ~CHMM()
Destructor - Cleanup.
Definition: HMM.cpp:399
void copy_model(CHMM *l)
copies the the modelparameters from l
Definition: HMM.cpp:2701
void clear_model()
initializes model with log(PSEUDO)
Definition: HMM.cpp:2662
int32_t get_const_b(int32_t line, int32_t column) const
get entry out of const_b matrix
Definition: HMM.h:141
void output_model_defined(bool verbose=false)
performs output_model only for the defined transitions etc
Definition: HMM.cpp:2340
void output_model(bool verbose=false)
Definition: HMM.cpp:2256
static const int32_t GOTconst_q
Definition: HMM.h:1360
void set_psi(int32_t time, T_STATES state, T_STATES value, int32_t dimension)
Definition: HMM.h:1074
void set_a(T_STATES line_, T_STATES column, float64_t value)
Definition: HMM.h:1031
void normalize(bool keep_dead_states=false)
normalize the model to satisfy stochasticity
Definition: HMM.cpp:4828
static float64_t logarithmic_sum(float64_t p, float64_t q)
Definition: Math.h:1996
Definition: HMM.cpp:54
static const int32_t GOTN
Definition: HMM.h:1331
Hidden Markov Model.
Definition: HMM.h:369
T_STATES * path
best path (=state sequence) through model
Definition: HMM.h:1319
int32_t * const_p
start states that have constant probability
Definition: HMM.h:331
static const uint8_t B_A
Definition: Alphabet.h:326
void init_model_random()
init model with random values
Definition: HMM.cpp:2442
void set_const_b(int32_t offset, int32_t value)
set value in const_b matrix
Definition: HMM.h:229
T_STATES get_N() const
access function for number of states N
Definition: HMM.h:981
void set_const_q(int32_t offset, int32_t value)
set value in const_q vector
Definition: HMM.h:241
class Model
Definition: HMM.h:85
int32_t get_learn_b(int32_t line, int32_t column) const
get entry out of learn_b matrix
Definition: HMM.h:117
virtual EFeatureType get_feature_type() const =0
void estimate_model_baum_welch(CHMM *train)
Definition: HMM.cpp:1530
int32_t * learn_a
transitions to be learned
Definition: HMM.h:306
void error(int32_t p_line, const char *str)
parse error messages
Definition: HMM.h:1505
bool save_path(FILE *file)
Definition: HMM.cpp:4087
virtual int32_t get_vector_length(int32_t vec_num)
float64_t model_derivative_b(T_STATES i, uint16_t j, int32_t dimension)
computes log dp(lambda)/d b_ij.
Definition: HMM.h:1435
float64_t get_const_a_val(int32_t line) const
get value out of const_a_val vector
Definition: HMM.h:159
T_ALPHA_BETA alpha_cache
cache for forward variables can be terrible HUGE O(T*N)
Definition: HMM.h:1311
static const int32_t GOTa
Definition: HMM.h:1337

SHOGUN Machine Learning Toolbox - Documentation