46 using namespace Eigen;
49 #define CREATE_SGVECTOR(vec, len, sg_type) \
51 if (!vec.vector || vec.vlen!=len) \
52 vec=SGVector<sg_type>(len); \
56 #define CREATE_SGMATRIX(mat, rows, cols, sg_type) \
58 if (!mat.matrix || mat.num_rows!=rows || mat.num_cols!=cols) \
59 mat=SGMatrix<sg_type>(rows, cols); \
69 :
CInference(kernel, features, mean, labels, model)
80 SG_WARNING(
"The method does not require a minimizer. The provided minimizer will not be used.\n");
83 void CEPInferenceMethod::init()
97 SG_SERROR(
"Provided inference is not of type CEPInferenceMethod!\n")
193 if (m_ttau.
vlen!=n || m_nlZ>nlZ0)
233 while ((
CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep<m_max_sweep) ||
247 tau_n[i]=1.0/m_Sigma(i,i)-m_ttau[i];
248 nu_n[i]=m_mu[i]/m_Sigma(i,i)+mean[i]*tau_n[i]-m_tnu[i];
251 mu_n[i]=nu_n[i]/tau_n[i];
252 s2_n[i]=1.0/tau_n[i];
266 m_tnu[i]=mu/s2-nu_n[i];
277 VectorXd eigen_si=eigen_Sigma.col(i);
280 eigen_Sigma=eigen_Sigma-ds2/(1.0+ds2*eigen_si(i))*eigen_si*
284 eigen_mu=eigen_Sigma*eigen_tnu;
296 if (sweep==m_max_sweep &&
CMath::abs(m_nlZ-nlZ_old)>m_tol)
298 SG_ERROR(
"Maximum number (%d) of sweeps reached, but tolerance (%f) was "
299 "not yet reached. You can manually set maximum number of sweeps "
300 "or tolerance to fix this problem.\n", m_max_sweep, m_tol);
327 VectorXd eigen_v=eigen_L.triangularView<Upper>().adjoint().solve(
329 eigen_v=eigen_L.triangularView<Upper>().solve(eigen_v);
334 eigen_alpha=eigen_tnu-eigen_sttau.cwiseProduct(eigen_v);
350 LLT<MatrixXd> eigen_chol((eigen_sttau*eigen_sttau.adjoint()).cwiseProduct(
354 eigen_L=eigen_chol.matrixU();
370 MatrixXd eigen_V=eigen_L.triangularView<Upper>().adjoint().solve(
391 eigen_mu=eigen_Sigma*eigen_tnu;
408 VectorXd eigen_tau_n=(VectorXd::Ones(m_ttau.
vlen)).cwiseQuotient(
409 eigen_Sigma.diagonal())-eigen_ttau;
412 VectorXd eigen_nu_n=eigen_mu.cwiseQuotient(eigen_Sigma.diagonal())-
413 eigen_tnu+eigen_m.cwiseProduct(eigen_tau_n);
419 eigen_mu_n=eigen_nu_n.cwiseQuotient(eigen_tau_n);
425 eigen_s2_n=(VectorXd::Ones(m_ttau.
vlen)).cwiseQuotient(eigen_tau_n);
431 float64_t nlZ_part1=eigen_L.diagonal().array().log().sum()-lZ-
432 (eigen_tnu.adjoint()*eigen_Sigma).
dot(eigen_tnu)/2.0;
435 float64_t nlZ_part2=(eigen_tnu.array().square()/
436 (eigen_tau_n+eigen_ttau).array()).sum()/2.0-(1.0+eigen_ttau.array()/
437 eigen_tau_n.array()).log().sum()/2.0;
441 float64_t nlZ_part3=-(eigen_nu_n-eigen_m.cwiseProduct(eigen_tau_n)).
dot(
442 ((eigen_ttau.array()/eigen_tau_n.array()*(eigen_nu_n.array()-
443 eigen_m.array()*eigen_tau_n.array())-2*eigen_tnu.array())/
444 (eigen_ttau.array()+eigen_tau_n.array())).matrix())/2.0;
447 m_nlZ=nlZ_part1+nlZ_part2+nlZ_part3;
462 MatrixXd V=eigen_L.triangularView<Upper>().adjoint().solve(
463 MatrixXd(eigen_sttau.asDiagonal()));
464 V=eigen_L.triangularView<Upper>().solve(V);
467 eigen_F=eigen_alpha*eigen_alpha.adjoint()-eigen_sttau.asDiagonal()*V;
473 REQUIRE(!strcmp(param->
m_name,
"log_scale"),
"Can't compute derivative of "
474 "the nagative log marginal likelihood wrt %s.%s parameter\n",
483 result[0]=-(eigen_F.cwiseProduct(eigen_K)).sum();
502 REQUIRE(param,
"Param not set\n");
504 int64_t len=
const_cast<TParameter *
>(param)->m_datatype.get_num_elements();
519 result[i]=-(eigen_F.cwiseProduct(eigen_dK)).sum();
virtual void update_approx_mean()
void range_fill(T start=0)
static void permute(SGVector< T > v, CRandom *rand=NULL)
virtual SGVector< float64_t > get_diagonal_vector()
virtual void update_parameter_hash()
virtual SGVector< float64_t > get_alpha()
virtual void update_approx_cov()
virtual void compute_gradient()
static CEPInferenceMethod * obtain_from_generic(CInference *inference)
Vector::Scalar dot(Vector a, Vector b)
The class Labels models labels, i.e. class assignments of objects.
static const float64_t INFTY
infinity
virtual EInferenceType get_inference_type() const
virtual SGMatrix< float64_t > get_posterior_covariance()
virtual int32_t get_num_labels() const =0
virtual SGVector< float64_t > get_log_zeroth_moments(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab) const =0
#define SG_NOTIMPLEMENTED
virtual SGVector< float64_t > get_mean_vector(const CFeatures *features) const =0
#define CREATE_SGMATRIX(mat, rows, cols, sg_type)
virtual float64_t get_negative_log_marginal_likelihood()
virtual float64_t get_second_moment(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab, index_t i) const =0
An abstract class of the mean function.
void scale(T alpha)
Scale vector inplace.
virtual SGVector< float64_t > get_derivative_wrt_kernel(const TParameter *param)
SGMatrix< float64_t > m_ktrtr
virtual SGVector< float64_t > get_posterior_mean()
SGVector< T > get_diagonal_vector() const
virtual SGVector< float64_t > get_derivative_wrt_mean(const TParameter *param)
virtual void update_negative_ml()
virtual void compute_gradient()
#define CREATE_SGVECTOR(vec, len, sg_type)
static T sum(T *vec, int32_t len)
Return sum(vec)
SGMatrix< float64_t > m_L
virtual void update_chol()
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
virtual SGVector< float64_t > get_derivative_wrt_likelihood_model(const TParameter *param)
virtual void register_minimizer(Minimizer *minimizer)
virtual ~CEPInferenceMethod()
all of classes and functions are contained in the shogun namespace
virtual SGVector< float64_t > get_derivative_wrt_inference_method(const TParameter *param)
The Inference Method base class.
Class of the Expectation Propagation (EP) posterior approximation inference method.
The class Features is the base class of all feature objects.
static float64_t exp(float64_t x)
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
The minimizer base class.
virtual SGMatrix< float64_t > get_cholesky()
static float32_t sqrt(float32_t x)
virtual float64_t get_first_moment(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab, index_t i) const =0
virtual void update_alpha()
virtual const char * get_name() const
CLikelihoodModel * m_model
virtual bool parameter_hash_changed()
The Likelihood model base class.
virtual void update_deriv()
SGVector< float64_t > m_alpha