SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
FKFeatures.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
14 #include <shogun/io/SGIO.h>
15 #include <shogun/base/Parameter.h>
16 
17 using namespace shogun;
18 
20 {
21  init();
22 }
23 
24 CFKFeatures::CFKFeatures(int32_t size, CHMM* p, CHMM* n)
26 {
27  init();
28  weight_a=-1;
29  set_models(p,n);
30 }
31 
33 : CDenseFeatures<float64_t>(orig), pos(orig.pos), neg(orig.neg), weight_a(orig.weight_a)
34 {
35 }
36 
38 {
39  SG_UNREF(pos);
40  SG_UNREF(neg);
41 }
42 
44 {
46  float64_t deriv=0.0 ;
47  int32_t i=dimension ;
48 
49  if (dimension==-1)
50  {
51  for (i=0; i<Obs->get_num_vectors(); i++)
52  {
53  //float64_t pp=pos->model_probability(i) ;
54  //float64_t pn=neg->model_probability(i) ;
57  float64_t sub=pp ;
58  if (pn>pp) sub=pn ;
59  pp-=sub ;
60  pn-=sub ;
61  pp=exp(pp) ;
62  pn=exp(pn) ;
63  float64_t p=a*pp+(1-a)*pn ;
64  deriv+=(pp-pn)/p ;
65 
66  /*float64_t d1=(pp-pn)/p ;
67  pp=exp(pos->model_probability(i)) ;
68  pn=exp(neg->model_probability(i)) ;
69  p=a*pp+(1-a)*pn ;
70  float64_t d2=(pp-pn)/p ;
71  fprintf(stderr, "d1=%e d2=%e, d1-d2=%e\n",d1,d2) ;*/
72  } ;
73  } else
74  {
77  float64_t sub=pp ;
78  if (pn>pp) sub=pn ;
79  pp-=sub ;
80  pn-=sub ;
81  pp=exp(pp) ;
82  pn=exp(pn) ;
83  float64_t p=a*pp+(1-a)*pn ;
84  deriv+=(pp-pn)/p ;
85  } ;
86 
87  return deriv ;
88 }
89 
90 
92 {
93  if (a==-1)
94  {
95  SG_INFO("estimating a.\n")
98  for (int32_t i=0; i<pos->get_observations()->get_num_vectors(); i++)
99  {
101  neg_prob[i]=neg->model_probability(i) ;
102  }
103 
104  float64_t la=0;
105  float64_t ua=1;
106  a=(la+ua)/2;
107  while (CMath::abs(ua-la)>1e-6)
108  {
109  float64_t da=deriv_a(a);
110  if (da>0)
111  la=a;
112  if (da<=0)
113  ua=a;
114  a=(la+ua)/2;
115  SG_INFO("opt_a: a=%1.3e deriv=%1.3e la=%1.3e ua=%1.3e\n", a, da, la ,ua)
116  }
117  SG_FREE(pos_prob);
118  SG_FREE(neg_prob);
119  pos_prob=NULL;
120  neg_prob=NULL;
121  }
122 
123  weight_a=a;
124  SG_INFO("setting opt_a: %g\n", a)
125  return a;
126 }
127 
129 {
130  ASSERT(p && n)
131  SG_REF(p);
132  SG_REF(n);
133 
134  pos=p;
135  neg=n;
136  set_num_vectors(0);
137 
139 
140  SG_INFO("pos_feat=[%i,%i,%i,%i],neg_feat=[%i,%i,%i,%i]\n", pos->get_N(), pos->get_N(), pos->get_N()*pos->get_N(), pos->get_N()*pos->get_M(), neg->get_N(), neg->get_N(), neg->get_N()*neg->get_N(), neg->get_N()*neg->get_M())
141 
142  if (pos && pos->get_observations())
144  if (pos && neg)
145  num_features=1+pos->get_N()*(1+pos->get_N()+1+pos->get_M()) + neg->get_N()*(1+neg->get_N()+1+neg->get_M()) ;
146 }
147 
149  int32_t num, int32_t &len, float64_t* target)
150 {
151  float64_t* featurevector=target;
152 
153  if (!featurevector)
154  featurevector=SG_MALLOC(float64_t,
155  1+
156  pos->get_N()*(1+pos->get_N()+1+pos->get_M())+
157  neg->get_N()*(1+neg->get_N()+1+neg->get_M())
158  );
159 
160  if (!featurevector)
161  return NULL;
162 
163  compute_feature_vector(featurevector, num, len);
164 
165  return featurevector;
166 }
167 
169  float64_t* featurevector, int32_t num, int32_t& len)
170 {
171  int32_t i,j,p=0,x=num;
172 
175 
176  len=1+pos->get_N()*(1+pos->get_N()+1+pos->get_M()) + neg->get_N()*(1+neg->get_N()+1+neg->get_M());
177 
178  featurevector[p++] = deriv_a(weight_a, x);
180  posx+log(weight_a),negx+log(1-weight_a));
181 
182  //first do positive model
183  for (i=0; i<pos->get_N(); i++)
184  {
185  featurevector[p++]=weight_a*exp(pos->model_derivative_p(i, x)-px);
186  featurevector[p++]=weight_a*exp(pos->model_derivative_q(i, x)-px);
187 
188  for (j=0; j<pos->get_N(); j++) {
189  featurevector[p++]=weight_a*exp(pos->model_derivative_a(i, j, x)-px);
190  }
191 
192  for (j=0; j<pos->get_M(); j++) {
193  featurevector[p++]=weight_a*exp(pos->model_derivative_b(i, j, x)-px);
194  }
195 
196  }
197 
198  //then do negative
199  for (i=0; i<neg->get_N(); i++)
200  {
201  featurevector[p++]= (1-weight_a)*exp(neg->model_derivative_p(i, x)-px);
202  featurevector[p++]= (1-weight_a)* exp(neg->model_derivative_q(i, x)-px);
203 
204  for (j=0; j<neg->get_N(); j++) {
205  featurevector[p++]= (1-weight_a)*exp(neg->model_derivative_a(i, j, x)-px);
206  }
207 
208  for (j=0; j<neg->get_M(); j++) {
209  featurevector[p++]= (1-weight_a)*exp(neg->model_derivative_b(i, j, x)-px);
210  }
211  }
212 }
213 
215 {
216  ASSERT(pos)
218  ASSERT(neg)
220 
221  int32_t len=0;
222  num_features=1+ pos->get_N()*(1+pos->get_N()+1+pos->get_M()) + neg->get_N()*(1+neg->get_N()+1+neg->get_M());
223 
226 
227  SG_INFO("allocating FK feature cache of size %.2fM\n", sizeof(float64_t)*num_features*num_vectors/1024.0/1024.0)
230 
231  SG_INFO("calculating FK feature matrix\n")
232 
233  for (int32_t x=0; x<num_vectors; x++)
234  {
235  if (!(x % (num_vectors/10+1)))
236  SG_DEBUG("%02d%%.", (int) (100.0*x/num_vectors))
237  else if (!(x % (num_vectors/200+1)))
238  SG_DEBUG(".")
239 
240  compute_feature_vector(&feature_matrix.matrix[x*num_features], x, len);
241  }
242 
243  SG_DONE()
244 
245  num_vectors=get_num_vectors();
246  num_features=get_num_features();
247 
248  return feature_matrix.matrix;
249 }
250 
251 void CFKFeatures::init()
252 {
253  pos = NULL;
254  neg = NULL;
255  pos_prob = NULL;
256  neg_prob = NULL;
257  weight_a = 0.0;
258 
259  unset_generic();
260  //TODO serialize HMMs
261  //m_parameters->add((CSGObject**) &pos, "pos", "HMM for positive class.");
262  //m_parameters->add((CSGObject**) &neg, "neg", "HMM for negative class.");
263  m_parameters->add(&weight_a, "weight_a", "Class prior.");
264 }

SHOGUN Machine Learning Toolbox - Documentation