49 using namespace Eigen;
60 void CSoftMaxLikelihood::init()
63 SG_ADD(&m_num_samples,
"num_samples",
64 "Number of samples to be generated",
71 REQUIRE(lab,
"Labels are required (lab should not be NULL)\n")
73 "Labels must be type of CMulticlassLabels\n")
76 for (int32_t i=0;i<labels.vlen;i++)
77 REQUIRE(((labels[i]>-1)&&(labels[i]<func.
vlen/labels.vlen)),
78 "Labels must be between 0 and C(ie %d here). Currently labels[%d] is"
79 "%d\n",func.
vlen/labels.vlen,i,labels[i]);
85 VectorXd max_coeff=eigen_f.rowwise().maxCoeff();
86 eigen_f=eigen_f.array().colwise()-max_coeff.array();
87 VectorXd log_sum_exp=((eigen_f.array().exp()).rowwise().sum()).array().log();
88 log_sum_exp=log_sum_exp+max_coeff;
91 eigen_f=eigen_f.array().colwise()+max_coeff.array();
96 for (int32_t i=0;i<labels.vlen;i++)
97 eigen_ret(i)=eigen_f(i,labels[i]);
99 eigen_ret=eigen_ret-log_sum_exp;
108 int32_t num_cols=func.
vlen/num_rows;
112 return get_log_probability_derivative1_f(lab,f);
114 return get_log_probability_derivative2_f(f);
116 return get_log_probability_derivative3_f(f);
122 REQUIRE(lab,
"Labels are required (lab should not be NULL)\n")
125 REQUIRE(lab->get_num_labels()==func.num_rows, "Number of labels must match "
126 "number of vectors in function\n")
129 for (int32_t i=0;i<labels.vlen;i++)
130 REQUIRE(((labels[i]>-1)&&(labels[i]<func.num_cols)),
131 "Labels must be between 0 and C(ie %d here). Currently labels[%d] is"
132 "%d\n",func.num_cols,i,labels[i]);
135 memcpy(ret.vector,func.matrix,func.num_rows*func.num_cols*sizeof(
float64_t));
138 Map<
MatrixXd> eigen_ret(ret.vector,func.num_rows,func.num_cols);
141 VectorXd max_coeff=eigen_ret.rowwise().maxCoeff();
142 eigen_ret=eigen_ret.array().colwise()-max_coeff.array();
143 VectorXd log_sum_exp=((eigen_ret.array().exp()).rowwise().sum()).array().log();
144 eigen_ret=(eigen_ret.array().colwise()-log_sum_exp.array()).exp();
153 for (int32_t i=0;i<labels.vlen;i++)
156 eigen_ret=y-eigen_ret;
166 Map<MatrixXd> eigen_f(func.matrix,func.num_rows,func.num_cols);
169 VectorXd tmp=f1.rowwise().sum();
170 f1=f1.array().colwise()/tmp.array();
172 for (int32_t i=0;i<eigen_f.rows();i++)
174 eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())=
175 f1.transpose().col(i)*f1.row(i);
176 VectorXd D=eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())
177 .diagonal().array().sqrt();
178 eigen_ret.block(i*eigen_f.cols(),0,eigen_f.cols(),eigen_f.cols())-=
193 VectorXd tmp=f1.rowwise().sum();
194 f1=f1.array().colwise()/tmp.array();
196 for (int32_t i=0;i<func.
num_rows;i++)
198 for (int32_t c1=0;c1<func.
num_cols;c1++)
200 for (int32_t c2=0;c2<func.
num_cols;c2++)
202 for (int32_t c3=0;c3<func.
num_cols;c3++)
204 float64_t sum_temp=0;
205 if ((c1==c2) && (c2==c3))
208 sum_temp=sum_temp-f1(i,c1)*f1(i,c3);
210 sum_temp=sum_temp-f1(i,c1)*f1(i,c2);
212 sum_temp=sum_temp-f1(i,c1)*f1(i,c2);
213 sum_temp+=2.0*f1(i,c1)*f1(i,c2)*f1(i,c3);
227 REQUIRE(num_samples>0,
"Numer of samples (%d) should be positive\n",
229 m_num_samples=num_samples;
238 REQUIRE(n*C==mu.
vlen,
"Number of labels (%d) times number of classes (%d) must match "
239 "number of elements(%d) in mu\n", n, C, mu.
vlen);
241 REQUIRE(n*C*C==s2.
vlen,
"Number of labels (%d) times second power of number of classes (%d*%d) must match "
242 "number of elements(%d) in s2\n",n, C, C, s2.
vlen);
249 "Labels must be type of CMulticlassLabels\n");
252 REQUIRE(n==n1,
"Number of samples (%d) learned from mu and s2 must match "
253 "number of labels(%d) in lab\n",n,n1);
257 REQUIRE(y[i]<C,
"Labels must be between 0 and C(ie %d here). Currently lab[%d] is"
268 for(
index_t idx=0; idx<n; idx++)
280 label.set_const(1.0);
286 eigen_ret_sub=eigen_tmp;
291 eigen_ret_sub=eigen_ret_sub.array()*eigen_label.array()+(1-eigen_ret_sub.array())*(1-eigen_label.array());
298 eigen_ret=eigen_ret.array()*(1-eigen_ret.array());
319 MatrixXd my_samples=eigen_samples.array().exp();
320 VectorXd sum_samples=my_samples.array().colwise().sum().transpose();
321 MatrixXd normal_samples=(my_samples.array().rowwise()/sum_samples.array().transpose());
322 VectorXd mean_samples=normal_samples.rowwise().mean();
329 eigen_est=(mean_samples.array()*eigen_y.array())+(1-mean_samples.array())*(1-eigen_y.array());
340 return predictive_helper(mu, s2, lab,
MC_Mean);
346 return predictive_helper(mu, s2, lab,
MC_Variance);
virtual SGVector< float64_t > get_log_probability_f(const CLabels *lab, SGVector< float64_t > func) const
virtual void set_num_samples(index_t num_samples)
Class that models Soft-Max likelihood.
virtual ELabelType get_label_type() const =0
The class Labels models labels, i.e. class assignments of objects.
virtual int32_t get_num_labels() const =0
multi-class labels 0,1,...
virtual SGVector< float64_t > get_log_probability_derivative_f(const CLabels *lab, SGVector< float64_t > func, index_t i) const
Multiclass Labels for multi-class classification.
virtual ~CSoftMaxLikelihood()
Matrix< float64_t,-1,-1, 0,-1,-1 > MatrixXd
virtual SGVector< float64_t > get_predictive_log_probabilities(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL)
all of classes and functions are contained in the shogun namespace
Dense version of the well-known Gaussian probability distribution, defined as .
The Likelihood model base class.
static int32_t pow(bool x, int32_t n)
void set_const(T const_elem)
virtual SGVector< float64_t > get_predictive_means(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL) const
virtual SGVector< float64_t > get_predictive_variances(SGVector< float64_t > mu, SGVector< float64_t > s2, const CLabels *lab=NULL) const
virtual SGMatrix< float64_t > sample(int32_t num_samples, SGMatrix< float64_t > pre_samples=SGMatrix< float64_t >()) const