52 for (int32_t i=0; i<
m_d.
vlen; i++)
71 SG_ERROR(
"Specified features are not of type CDotFeatures\n")
120 REQUIRE(dotdata,
"dynamic cast from CFeatures to CDotFeatures returned NULL\n")
128 for (int32_t i=0;i<len;i++)
130 alpha_k_sum+=alpha_k[i];
135 for (int32_t i=0; i<num_dim; i++)
136 mean[i]/=alpha_k_sum;
146 cov_sum=SG_MALLOC(
float64_t, num_dim*num_dim);
147 memset(cov_sum, 0, num_dim*num_dim*
sizeof(
float64_t));
149 else if(cov_type==
DIAG)
152 memset(cov_sum, 0, num_dim*
sizeof(
float64_t));
160 for (int32_t j=0; j<len; j++)
168 cblas_dger(CblasRowMajor, num_dim, num_dim, alpha_k[j], v.
vector, 1, v.
vector,
169 1, (
double*) cov_sum, num_dim);
173 for (int32_t k=0; k<num_dim; k++)
180 for (int32_t k=0; k<num_dim; k++)
183 cov_sum[0]+=temp*alpha_k[j];
191 for (int32_t j=0; j<num_dim*num_dim; j++)
192 cov_sum[j]/=alpha_k_sum;
203 for (int32_t j=0; j<num_dim; j++)
204 cov_sum[j]/=alpha_k_sum;
211 cov_sum[0]/=alpha_k_sum*num_dim;
239 for (int32_t i=0; i<
m_d.
vlen; i++)
240 answer+=temp_holder[i]*temp_holder[i]/
m_d.
vector[i];
242 SG_FREE(temp_holder);
247 answer+=difference[i]*difference[i]/
m_d.
vector[i];
252 answer+=difference[i]*difference[i]/
m_d.
vector[0];
295 SG_ERROR(
"Unitary matrix not set\n")
300 for(int32_t i=0; i<
m_d.
vlen; i++)
303 cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans,
306 cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
310 SG_FREE(diag_holder);
311 SG_FREE(temp_holder);
315 for (int32_t i=0; i<
m_d.
vlen; i++)
326 void CGaussian::register_params()
350 for (int32_t i=0; i<cov.
num_rows; i++)
390 cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
394 r_matrix=temp_matrix;
400 1, r_matrix,
m_mean.
vlen, random_vec, 1, 0, samp, 1);
426 #endif // HAVE_LAPACK
SGVector< float64_t > sample()
void set_u(SGMatrix< float64_t > u)
static void fill_vector(T *vec, int32_t len, T value)
virtual void set_features(CFeatures *f)
Gaussian distribution interface.
virtual bool train(CFeatures *data=NULL)
static float64_t randn_double()
#define SG_NOTIMPLEMENTED
virtual float64_t compute_log_PDF(SGVector< float64_t > point)
Base class Distribution from which all methods implementing a distribution are derived.
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
virtual float64_t update_params_em(float64_t *alpha_k, int32_t len)
virtual SGVector< float64_t > get_mean()
SGMatrix< float64_t > m_u
virtual SGVector< float64_t > get_mean()
virtual float64_t get_log_model_parameter(int32_t num_param)
static CGaussian * obtain_from_generic(CDistribution *distribution)
virtual void set_cov(SGMatrix< float64_t > cov)
SGVector< float64_t > m_mean
virtual SGMatrix< float64_t > get_cov()
#define M_PI
workaround for log2 being a define on cygwin
virtual float64_t get_log_likelihood_example(int32_t num_example)
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
static float64_t log(float64_t v)
SGVector< float64_t > get_computed_dot_feature_vector(int32_t num)
virtual void set_mean(const SGVector< float64_t > mean)
static float32_t sqrt(float32_t x)
bool has_property(EFeatureProperty p) const
virtual int32_t get_num_model_parameters()
static SGVector< float64_t > compute_eigenvectors(SGMatrix< float64_t > matrix)
void add(const SGVector< T > x)
virtual SGMatrix< float64_t > get_cov()
virtual float64_t get_log_derivative(int32_t num_param, int32_t num_example)
void set_d(const SGVector< float64_t > d)
SGVector< float64_t > m_d