48 using namespace Eigen;
59 void CSoftMaxLikelihood::init()
62 SG_ADD(&m_num_samples,
"num_samples",
63 "Number of samples to be generated",
70 REQUIRE(lab,
"Labels are required (lab should not be NULL)\n")
72 "Labels must be type of CMulticlassLabels\n")
75 for (int32_t i=0;i<labels.vlen;i++)
76 REQUIRE(((labels[i]>-1)&&(labels[i]<func.
vlen/labels.vlen)),
77 "Labels must be between 0 and C(ie %d here). Currently labels[%d] is"
78 "%d\n",func.
vlen/labels.vlen,i,labels[i]);
84 VectorXd max_coeff=eigen_f.rowwise().maxCoeff();
85 eigen_f=eigen_f.array().colwise()-max_coeff.array();
86 VectorXd log_sum_exp=((eigen_f.array().exp()).rowwise().sum()).array().log();
87 log_sum_exp=log_sum_exp+max_coeff;
90 eigen_f=eigen_f.array().colwise()+max_coeff.array();
95 for (int32_t i=0;i<labels.vlen;i++)
96 eigen_ret(i)=eigen_f(i,labels[i]);
98 eigen_ret=eigen_ret-log_sum_exp;
107 int32_t num_cols=func.
vlen/num_rows;
111 return get_log_probability_derivative1_f(lab,f);
113 return get_log_probability_derivative2_f(f);
115 return get_log_probability_derivative3_f(f);
121 REQUIRE(lab,
"Labels are required (lab should not be NULL)\n")
124 REQUIRE(lab->get_num_labels()==func.num_rows, "Number of labels must match "
125 "number of vectors in function\n")
128 for (int32_t i=0;i<labels.vlen;i++)
129 REQUIRE(((labels[i]>-1)&&(labels[i]<func.num_cols)),
130 "Labels must be between 0 and C(ie %d here). Currently labels[%d] is"
131 "%d\n",func.num_cols,i,labels[i]);
134 memcpy(ret.vector,func.matrix,func.num_rows*func.num_cols*sizeof(
float64_t));
137 Map<
MatrixXd> eigen_ret(ret.vector,func.num_rows,func.num_cols);
140 VectorXd max_coeff=eigen_ret.rowwise().maxCoeff();
141 eigen_ret=eigen_ret.array().colwise()-max_coeff.array();
142 VectorXd log_sum_exp=((eigen_ret.array().exp()).rowwise().sum()).array().log();
143 eigen_ret=(eigen_ret.array().colwise()-log_sum_exp.array()).exp();
152 for (int32_t i=0;i<labels.vlen;i++)
155 eigen_ret=y-eigen_ret;
165 Map<MatrixXd> eigen_f(func.matrix,func.num_rows,func.num_cols);
168 VectorXd tmp=f1.rowwise().sum();
169 f1=f1.array().colwise()/tmp.array();
171 for (int32_t i=0;i<eigen_f.rows();i++)
173 eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())=
174 f1.transpose().col(i)*f1.row(i);
175 VectorXd D=eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())
176 .diagonal().array().sqrt();
177 eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())-=
192 VectorXd tmp=f1.rowwise().sum();
193 f1=f1.array().colwise()/tmp.array();
195 for (int32_t i=0;i<func.
num_rows;i++)
197 for (int32_t c1=0;c1<func.
num_cols;c1++)
199 for (int32_t c2=0;c2<func.
num_cols;c2++)
201 for (int32_t c3=0;c3<func.
num_cols;c3++)
203 float64_t sum_temp=0;
204 if ((c1==c2) && (c2==c3))
207 sum_temp=sum_temp-f1(i,c1)*f1(i,c3);
209 sum_temp=sum_temp-f1(i,c1)*f1(i,c2);
211 sum_temp=sum_temp-f1(i,c1)*f1(i,c2);
212 sum_temp+=2.0*f1(i,c1)*f1(i,c2)*f1(i,c3);
226 REQUIRE(num_samples>0,
"Numer of samples (%d) should be positive\n",
228 m_num_samples=num_samples;
237 REQUIRE(n*C==mu.
vlen,
"Number of labels (%d) times number of classes (%d) must match "
238 "number of elements(%d) in mu\n", n, C, mu.
vlen);
240 REQUIRE(n*C*C==s2.
vlen,
"Number of labels (%d) times second power of number of classes (%d*%d) must match "
241 "number of elements(%d) in s2\n",n, C, C, s2.
vlen);
248 "Labels must be type of CMulticlassLabels\n");
251 REQUIRE(n==n1,
"Number of samples (%d) learned from mu and s2 must match "
252 "number of labels(%d) in lab\n",n,n1);
256 REQUIRE(y[i]<C,
"Labels must be between 0 and C(ie %d here). Currently lab[%d] is"
267 for(
index_t idx=0; idx<n; idx++)
279 label.set_const(1.0);
285 eigen_ret_sub=eigen_tmp;
290 eigen_ret_sub=eigen_ret_sub.array()*eigen_label.array()+(1-eigen_ret_sub.array())*(1-eigen_label.array());
297 eigen_ret=eigen_ret.array()*(1-eigen_ret.array());
318 MatrixXd my_samples=eigen_samples.array().exp();
319 VectorXd sum_samples=my_samples.array().colwise().sum().transpose();
320 MatrixXd normal_samples=(my_samples.array().rowwise()/sum_samples.array().transpose());
321 VectorXd mean_samples=normal_samples.rowwise().mean();
328 eigen_est=(mean_samples.array()*eigen_y.array())+(1-mean_samples.array())*(1-eigen_y.array());
339 return predictive_helper(mu, s2, lab,
MC_Mean);
345 return predictive_helper(mu, s2, lab,
MC_Variance);
virtual SGVector< float64_t > get_log_probability_f(const CLabels *lab, SGVector< float64_t > func) const
virtual void set_num_samples(index_t num_samples)
Class that models Soft-Max likelihood.
virtual ELabelType get_label_type() const =0
The class Labels models labels, i.e. class assignments of objects.
virtual int32_t get_num_labels() const =0
multi-class labels 0,1,...
virtual SGVector< float64_t > get_log_probability_derivative_f(const CLabels *lab, SGVector< float64_t > func, index_t i) const
Multiclass Labels for multi-class classification.
virtual ~CSoftMaxLikelihood()
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
virtual SGVector< float64_t > get_predictive_log_probabilities(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL)
all of classes and functions are contained in the shogun namespace
Dense version of the well-known Gaussian probability distribution, defined as .
The Likelihood model base class.
static int32_t pow(bool x, int32_t n)
void set_const(T const_elem)
virtual SGVector< float64_t > get_predictive_means(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL) const
virtual SGVector< float64_t > get_predictive_variances(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL) const
virtual SGMatrix< float64_t > sample(int32_t num_samples, SGMatrix< float64_t > pre_samples=SGMatrix< float64_t >()) const