53 for (int32_t i=0; i<
m_d.
vlen; i++)
72 SG_ERROR(
"Specified features are not of type CDotFeatures\n")
121 REQUIRE(dotdata,
"dynamic cast from CFeatures to CDotFeatures returned NULL\n")
129 for (int32_t i=0;i<len;i++)
131 alpha_k_sum+=alpha_k[i];
136 for (int32_t i=0; i<num_dim; i++)
137 mean[i]/=alpha_k_sum;
147 cov_sum=SG_MALLOC(
float64_t, num_dim*num_dim);
148 memset(cov_sum, 0, num_dim*num_dim*
sizeof(
float64_t));
150 else if(cov_type==
DIAG)
153 memset(cov_sum, 0, num_dim*
sizeof(
float64_t));
161 for (int32_t j=0; j<len; j++)
169 cblas_dger(CblasRowMajor, num_dim, num_dim, alpha_k[j], v.
vector, 1, v.
vector,
170 1, (
double*) cov_sum, num_dim);
174 for (int32_t k=0; k<num_dim; k++)
181 for (int32_t k=0; k<num_dim; k++)
184 cov_sum[0]+=temp*alpha_k[j];
192 for (int32_t j=0; j<num_dim*num_dim; j++)
193 cov_sum[j]/=alpha_k_sum;
204 for (int32_t j=0; j<num_dim; j++)
205 cov_sum[j]/=alpha_k_sum;
212 cov_sum[0]/=alpha_k_sum*num_dim;
240 for (int32_t i=0; i<
m_d.
vlen; i++)
241 answer+=temp_holder[i]*temp_holder[i]/
m_d.
vector[i];
243 SG_FREE(temp_holder);
248 answer+=difference[i]*difference[i]/
m_d.
vector[i];
253 answer+=difference[i]*difference[i]/
m_d.
vector[0];
296 SG_ERROR(
"Unitary matrix not set\n")
301 for(int32_t i=0; i<
m_d.
vlen; i++)
304 cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
307 cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
311 SG_FREE(diag_holder);
312 SG_FREE(temp_holder);
316 for (int32_t i=0; i<
m_d.
vlen; i++)
327 void CGaussian::register_params()
351 for (int32_t i=0; i<cov.
num_rows; i++)
391 cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
395 r_matrix=temp_matrix;
401 1, r_matrix,
m_mean.
vlen, random_vec, 1, 0, samp, 1);
427 #endif // HAVE_LAPACK
SGVector< float64_t > sample()
void set_u(SGMatrix< float64_t > u)
static void fill_vector(T *vec, int32_t len, T value)
virtual void set_features(CFeatures *f)
Gaussian distribution interface.
virtual bool train(CFeatures *data=NULL)
static float64_t randn_double()
#define SG_NOTIMPLEMENTED
virtual float64_t compute_log_PDF(SGVector< float64_t > point)
Base class Distribution from which all methods implementing a distribution are derived.
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
virtual float64_t update_params_em(float64_t *alpha_k, int32_t len)
virtual SGVector< float64_t > get_mean()
SGMatrix< float64_t > m_u
virtual SGVector< float64_t > get_mean()
virtual float64_t get_log_model_parameter(int32_t num_param)
static CGaussian * obtain_from_generic(CDistribution *distribution)
virtual void set_cov(SGMatrix< float64_t > cov)
SGVector< float64_t > m_mean
virtual SGMatrix< float64_t > get_cov()
#define M_PI
workaround for log2 being a define on cygwin
virtual float64_t get_log_likelihood_example(int32_t num_example)
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
static float64_t log(float64_t v)
SGVector< float64_t > get_computed_dot_feature_vector(int32_t num)
virtual void set_mean(const SGVector< float64_t > mean)
static float32_t sqrt(float32_t x)
bool has_property(EFeatureProperty p) const
virtual int32_t get_num_model_parameters()
static SGVector< float64_t > compute_eigenvectors(SGMatrix< float64_t > matrix)
void add(const SGVector< T > x)
virtual SGMatrix< float64_t > get_cov()
virtual float64_t get_log_derivative(int32_t num_param, int32_t num_example)
void set_d(const SGVector< float64_t > d)
SGVector< float64_t > m_d