Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #ifndef _LIBLINEARMTL_H___
00014 #define _LIBLINEARMTL_H___
00015
00016 #include <shogun/lib/config.h>
00017
00018
00019 #include <shogun/lib/common.h>
00020 #include <shogun/base/Parameter.h>
00021 #include <shogun/machine/LinearMachine.h>
00022 #include <shogun/optimization/liblinear/shogun_liblinear.h>
00023 #include <shogun/lib/SGSparseMatrix.h>
00024
00025
00026 namespace shogun
00027 {
00028
00029 #ifdef HAVE_LAPACK
00030
00031
00035 class MappedSparseMatrix
00036 {
00037
00038 public:
00039
00044 inline const float64_t operator()(index_t i_row, index_t i_col) const
00045 {
00046
00047
00048 std::map<index_t, float64_t>::const_iterator it = data[i_row].find(i_col);
00049
00050 if (it != data[i_row].end())
00051 {
00052
00053 return it->second;
00054 } else {
00055 return 0.0;
00056 }
00057 }
00058
00062 void set_from_sparse(const SGSparseMatrix<float64_t> &sgm)
00063 {
00064 data.clear();
00065
00066
00067 for (int32_t i=0; i!=sgm.num_vectors; i++)
00068 {
00069
00070 SGSparseVector<float64_t> ts_row = sgm.sparse_matrix[i];
00071 data.push_back(std::map<index_t, float64_t>());
00072
00073 for (int32_t k=0; k!=ts_row.num_feat_entries; k++)
00074 {
00075
00076 SGSparseVectorEntry<float64_t> e = ts_row.features[k];
00077 data[i][e.feat_index] = e.entry;
00078 }
00079
00080 }
00081 }
00082
00084 std::vector< std::map<index_t, float64_t> > data;
00085
00086 };
00087
00088
00090 class CLibLinearMTL : public CLinearMachine
00091 {
00092 public:
00094 CLibLinearMTL();
00095
00096
00103 CLibLinearMTL(
00104 float64_t C, CDotFeatures* traindat,
00105 CLabels* trainlab);
00106
00108 virtual ~CLibLinearMTL();
00109
00110
00115 virtual EMachineType get_classifier_type() { return CT_LIBLINEAR; }
00116
00122 inline void set_C(float64_t c_neg, float64_t c_pos) { C1=c_neg; C2=c_pos; }
00123
00128 inline float64_t get_C1() { return C1; }
00129
00134 inline float64_t get_C2() { return C2; }
00135
00140 inline void set_epsilon(float64_t eps) { epsilon=eps; }
00141
00146 inline float64_t get_epsilon() { return epsilon; }
00147
00152 inline void set_bias_enabled(bool enable_bias) { use_bias=enable_bias; }
00153
00158 inline bool get_bias_enabled() { return use_bias; }
00159
00161 virtual const char* get_name() const { return "LibLinearMTL"; }
00162
00164 inline int32_t get_max_iterations()
00165 {
00166 return max_iterations;
00167 }
00168
00170 inline void set_max_iterations(int32_t max_iter=1000)
00171 {
00172 max_iterations=max_iter;
00173 }
00174
00176 inline void set_num_tasks(int32_t nt)
00177 {
00178 num_tasks = nt;
00179 }
00180
00182 inline void set_linear_term(SGVector<float64_t> linear_term)
00183 {
00184 if (!m_labels)
00185 SG_ERROR("Please assign labels first!\n");
00186
00187 int32_t num_labels=m_labels->get_num_labels();
00188
00189 if (num_labels!=linear_term.vlen)
00190 {
00191 SG_ERROR("Number of labels (%d) does not match number"
00192 " of entries (%d) in linear term \n", num_labels,
00193 linear_term.vlen);
00194 }
00195
00196 m_linear_term = linear_term;
00197 }
00198
00200 inline void set_task_indicator_lhs(SGVector<int32_t> ti)
00201 {
00202 task_indicator_lhs = ti;
00203 }
00204
00206 inline void set_task_indicator_rhs(SGVector<int32_t> ti)
00207 {
00208 task_indicator_rhs = ti;
00209 }
00210
00212 inline void set_task_similarity_matrix(SGSparseMatrix<float64_t> tsm)
00213 {
00214 task_similarity_matrix.set_from_sparse(tsm);
00215 }
00216
00218 inline void set_graph_laplacian(SGMatrix<float64_t> lap)
00219 {
00220 graph_laplacian = lap;
00221 }
00222
00227 inline SGMatrix<float64_t> get_V()
00228 {
00229 return V;
00230 }
00231
00236 inline SGMatrix<float64_t> get_W()
00237 {
00238
00239 int32_t w_size = V.num_rows;
00240
00241 SGMatrix<float64_t> W = SGMatrix<float64_t>(w_size, num_tasks);
00242 for(int32_t k=0; k<w_size*num_tasks; k++)
00243 {
00244 W.matrix[k] = 0;
00245 }
00246
00247 for (int32_t s=0; s<num_tasks; s++)
00248 {
00249 float64_t* v_s = V.get_column_vector(s);
00250 for (int32_t t=0; t<num_tasks; t++)
00251 {
00252 float64_t sim_ts = task_similarity_matrix(s,t);
00253 for(int32_t i=0; i<w_size; i++)
00254 {
00255 W.matrix[t*w_size + i] += sim_ts * v_s[i];
00256 }
00257 }
00258 }
00259
00260 return W;
00261 }
00262
00267 inline SGVector<float64_t> get_alphas()
00268 {
00269 return alphas;
00270 }
00271
00276 virtual float64_t compute_primal_obj();
00277
00282 virtual float64_t compute_dual_obj();
00283
00288 virtual float64_t compute_duality_gap();
00289
00290
00291 protected:
00300 virtual bool train_machine(CFeatures* data=NULL);
00301
00302 private:
00304 void init();
00305
00306 void solve_l2r_l1l2_svc(
00307 const problem *prob, double eps, double Cp, double Cn);
00308
00309
00310 protected:
00312 float64_t C1;
00314 float64_t C2;
00316 bool use_bias;
00318 float64_t epsilon;
00320 int32_t max_iterations;
00321
00323 SGVector<float64_t> m_linear_term;
00324
00326 SGVector<float64_t> alphas;
00327
00329 int32_t num_tasks;
00330
00332 SGVector<int32_t> task_indicator_lhs;
00333
00335 SGVector<int32_t> task_indicator_rhs;
00336
00338
00339
00340 MappedSparseMatrix task_similarity_matrix;
00341
00343 SGMatrix<float64_t> graph_laplacian;
00344
00346 SGMatrix<float64_t> V;
00347
00349 float64_t duality_gap;
00350
00351 };
00352
00353 #endif //HAVE_LAPACK
00354
00355 }
00356
00357 #endif //_LIBLINEARMTL_H___