50 void CLibLinearMTL::init()
80 SG_ERROR(
"Specified features are not of type CDotFeatures\n")
92 if (num_vec!=num_train_labels)
94 SG_ERROR(
"number of vectors %d does not match "
95 "number of training labels %d\n",
96 num_vec, num_train_labels);
102 training_w=SG_MALLOC(
float64_t, num_feat+1);
104 training_w=SG_MALLOC(
float64_t, num_feat+0);
106 liblinear_problem prob;
110 memset(training_w, 0,
sizeof(
float64_t)*(num_feat+1));
115 memset(training_w, 0,
sizeof(
float64_t)*(num_feat+0));
122 for (int32_t i=0; i<prob.l; i++)
127 for(
int i=0;i<prob.l;i++)
134 SG_INFO(
"%d training points %d dims\n", prob.l, prob.n)
135 SG_INFO(
"%d positives, %d negatives\n", pos, neg)
139 solve_l2r_l1l2_svc(&prob,
epsilon, Cp, Cn);
149 for (int32_t i=0; i<num_feat; i++)
150 w[i] = training_w[i];
180 #define GETI(i) (y[i]+1)
184 void CLibLinearMTL::solve_l2r_l1l2_svc(
const liblinear_problem *prob,
double eps,
double Cp,
double Cn)
190 int w_size = prob->n;
193 double *QD = SG_MALLOC(
double, l);
194 int *index = SG_MALLOC(
int, l);
197 int32_t *y = SG_MALLOC(int32_t, l);
203 double PGmax_new, PGmin_new;
213 double diag[3] = {0.5/Cn, 0, 0.5/Cp};
229 for(int32_t k=0; k<w_size*
num_tasks; k++)
250 QD[i] = diag[
GETI(i)];
251 QD[i] += prob->x->dot(i, prob->x,i);
264 for (i=0; i<active_size; i++)
270 for (s=0;s<active_size;s++)
275 C = upper_bound[
GETI(i)];
279 typedef std::map<index_t, float64_t>::const_iterator map_iter;
286 int32_t e_i = it->first;
291 inner_sum += sim * yi * prob->x->dense_dot(i, tmp_w, n);
315 else if (alphas[i] == C)
333 if(fabs(PG) > 1.0e-12)
336 double alpha_old = alphas[i];
340 d = (alphas[i] - alpha_old)*yi;
344 prob->x->add_to_dense_vec(d, i, tmp_w, n);
368 PGmax_old = PGmax_new;
369 PGmin_old = PGmin_new;
377 SG_INFO(
"optimization finished, #iter = %d\n",iter)
380 SG_WARNING(
"reaching max number of iterations\nUsing -s 2 may be faster"
381 "(also see liblinear FAQ)\n\n");
435 SG_INFO(
"DONE to compute Primal OBJ\n")
448 for(int32_t i=0; i<w_size; i++)
450 obj += 0.5 * w_t[i]*w_t[i];
463 for(int32_t i=0; i<w_size; i++)
465 obj += 0.5 * l * w_s[i]*w_t[i];
471 for(int32_t i=0; i<num_vec; i++)
482 SG_INFO(
"DONE to compute Primal OBJ, obj=%f\n",obj)
504 SG_INFO(
"starting to compute DUAL OBJ\n")
511 for(int32_t i=0; i<num_vec; i++)
529 for(int32_t i=0; i<v_size; i++)
531 obj -= 0.5 * ts * v_s[i]*v_t[i];
Class Time that implements a stopwatch based on either cpu time or wall clock time.
The class Labels models labels, i.e. class assignments of objects.
static const float64_t INFTY
infinity
virtual float64_t dense_dot(int32_t vec_idx1, const float64_t *vec2, int32_t vec2_len)=0
virtual int32_t get_num_labels() const =0
virtual float64_t compute_dual_obj()
static float64_t log10(float64_t v)
virtual float64_t compute_primal_obj()
virtual int32_t get_num_vectors() const =0
float64_t m_max_train_time
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
float64_t cur_time_diff(bool verbose=false)
std::vector< std::map< index_t, float64_t > > data
static void clear_cancel()
virtual void set_features(CDotFeatures *feat)
virtual bool train_machine(CFeatures *data=NULL)
T * get_column_vector(index_t col) const
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
static bool cancel_computations()
virtual float64_t compute_duality_gap()
all of classes and functions are contained in the shogun namespace
SGMatrix< float64_t > get_W()
The class Features is the base class of all feature objects.
SGVector< int32_t > task_indicator_lhs
Binary Labels for binary classification.
MappedSparseMatrix task_similarity_matrix
static void swap(T &a, T &b)
virtual void set_bias(float64_t b)
SGMatrix< float64_t > graph_laplacian
void set_max_iterations(int32_t max_iter=1000)
bool has_property(EFeatureProperty p) const
virtual void set_labels(CLabels *lab)
#define SG_SABS_PROGRESS(...)
virtual void ensure_valid(const char *context=NULL)=0
SGVector< float64_t > alphas