24 int32_t size,
CHMM* p,
CHMM* n,
bool neglin,
bool poslin)
83 SG_DEBUG(
"pos_feat=[%i,%i,%i,%i],neg_feat=[%i,%i,%i,%i] -> %i features\n",
pos->
get_N(),
pos->
get_N(),
pos->
get_N()*
pos->
get_N(),
pos->
get_N()*
pos->
get_M(),
neg->
get_N(),
neg->
get_N(),
neg->
get_N()*
neg->
get_N(),
neg->
get_N()*
neg->
get_M(),
num_features)
87 int32_t num, int32_t &len,
float64_t* target)
103 float64_t* featurevector, int32_t num, int32_t& len)
105 int32_t i,j,p=0,x=num;
115 featurevector[p++]=(posx-negx);
205 SG_ERROR(
"allocation not successful!")
209 SG_INFO(
"calculating top feature matrix\n")
213 if (!(x % (num_vectors/10+1)))
214 SG_DEBUG(
"%02d%%.", (
int) (100.0*x/num_vectors))
215 else if (!(x % (num_vectors/200+1)))
239 for (i=0; i<hmm->
get_N(); i++)
247 for (j=0; j<hmm->
get_N(); j++)
260 if (hmm_idx->num_p > 0)
262 hmm_idx->idx_p=SG_MALLOC(int32_t, hmm_idx->num_p);
266 if (hmm_idx->num_q > 0)
268 hmm_idx->idx_q=SG_MALLOC(int32_t, hmm_idx->num_q);
272 if (hmm_idx->num_a > 0)
274 hmm_idx->idx_a_rows=SG_MALLOC(int32_t, hmm_idx->num_a);
275 hmm_idx->idx_a_cols=SG_MALLOC(int32_t, hmm_idx->num_a);
276 ASSERT(hmm_idx->idx_a_rows)
277 ASSERT(hmm_idx->idx_a_cols)
280 if (hmm_idx->num_b > 0)
282 hmm_idx->idx_b_rows=SG_MALLOC(int32_t, hmm_idx->num_b);
283 hmm_idx->idx_b_cols=SG_MALLOC(int32_t, hmm_idx->num_b);
284 ASSERT(hmm_idx->idx_b_rows)
285 ASSERT(hmm_idx->idx_b_cols)
294 for (i=0; i<hmm->
get_N(); i++)
298 ASSERT(idx_p < hmm_idx->num_p)
299 hmm_idx->idx_p[idx_p++]=i;
304 ASSERT(idx_q < hmm_idx->num_q)
305 hmm_idx->idx_q[idx_q++]=i;
308 for (j=0; j<hmm->
get_N(); j++)
312 ASSERT(idx_a < hmm_idx->num_a)
313 hmm_idx->idx_a_rows[idx_a]=i;
314 hmm_idx->idx_a_cols[idx_a++]=j;
322 ASSERT(idx_b < hmm_idx->num_b)
323 hmm_idx->idx_b_rows[idx_b]=i;
324 hmm_idx->idx_b_cols[idx_b++]=j;
361 void CTOPFeatures::init()
T_HMM_INDIZES pos_relevant_indizes
The class DenseFeatures implements dense feature matrices.
int32_t get_num_features() const
int32_t get_M() const
access function for number of observations M
virtual int32_t get_num_vectors() const
virtual float64_t * set_feature_matrix()
void set_models(CHMM *p, CHMM *n)
float64_t get_b(T_STATES line_, uint16_t column) const
T_HMM_INDIZES neg_relevant_indizes
float64_t linear_model_derivative(T_STATES i, uint16_t j, int32_t dimension)
int32_t num_features
number of features in cache
CStringFeatures< uint16_t > * get_observations()
return observation pointer
virtual float64_t * compute_feature_vector(int32_t num, int32_t &len, float64_t *target=NULL)
static const float64_t ALMOST_NEG_INFTY
almost neg (log) infinity
float64_t model_probability(int32_t dimension=-1)
inline proxy for model probability.
void add(bool *param, const char *name, const char *description="")
virtual int32_t get_num_vectors() const
float64_t model_derivative_q(T_STATES i, int32_t dimension)
void set_num_vectors(int32_t num)
float64_t get_q(T_STATES offset) const
SGMatrix< float64_t > feature_matrix
float64_t model_derivative_a(T_STATES i, T_STATES j, int32_t dimension)
computes log dp(lambda)/d a_ij.
The class TOPFeatures implements TOP kernel features obtained from two Hidden Markov models...
float64_t linear_model_probability(int32_t dimension)
float64_t get_a(T_STATES line_, T_STATES column) const
all of classes and functions are contained in the shogun namespace
float64_t model_derivative_p(T_STATES i, int32_t dimension)
float64_t get_p(T_STATES offset) const
bool compute_relevant_indizes(CHMM *hmm, T_HMM_INDIZES *hmm_idx)
int32_t num_vectors
number of vectors in cache
int32_t compute_num_features()
T_STATES get_N() const
access function for number of states N
float64_t model_derivative_b(T_STATES i, uint16_t j, int32_t dimension)
computes log dp(lambda)/d b_ij.