SVMOcas.cpp

Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or modify
00003  * it under the terms of the GNU General Public License as published by
00004  * the Free Software Foundation; either version 3 of the License, or
00005  * (at your option) any later version.
00006  *
00007  * Written (W) 2007-2008 Vojtech Franc
00008  * Written (W) 2007-2009 Soeren Sonnenburg
00009  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
00010  */
00011 
00012 #include "features/Labels.h"
00013 #include "lib/Mathematics.h"
00014 #include "lib/Time.h"
00015 #include "base/Parameter.h"
00016 #include "base/Parallel.h"
00017 #include "classifier/LinearClassifier.h"
00018 #include "classifier/svm/SVMOcas.h"
00019 #include "features/DotFeatures.h"
00020 #include "features/Labels.h"
00021 
00022 using namespace shogun;
00023 
00024 CSVMOcas::CSVMOcas()
00025 : CLinearClassifier()
00026 {
00027     init();
00028 }
00029 
00030 CSVMOcas::CSVMOcas(E_SVM_TYPE type)
00031 : CLinearClassifier()
00032 {
00033     init();
00034     method=type;
00035 }
00036 
00037 CSVMOcas::CSVMOcas(
00038     float64_t C, CDotFeatures* traindat, CLabels* trainlab)
00039 : CLinearClassifier()
00040 {
00041     init();
00042     C1=C;
00043     C2=C;
00044 
00045     set_features(traindat);
00046     set_labels(trainlab);
00047 }
00048 
00049 
00050 CSVMOcas::~CSVMOcas()
00051 {
00052 }
00053 
00054 bool CSVMOcas::train(CFeatures* data)
00055 {
00056     SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize);
00057     SG_DEBUG("use_bias = %i\n", get_bias_enabled()) ;
00058 
00059     ASSERT(labels);
00060     if (data)
00061     {
00062         if (!data->has_property(FP_DOT))
00063             SG_ERROR("Specified features are not of type CDotFeatures\n");
00064         set_features((CDotFeatures*) data);
00065     }
00066     ASSERT(features);
00067     ASSERT(labels->is_two_class_labeling());
00068 
00069     int32_t num_train_labels=0;
00070     lab=labels->get_labels(num_train_labels);
00071     w_dim=features->get_dim_feature_space();
00072     int32_t num_vec=features->get_num_vectors();
00073 
00074     if (num_vec!=num_train_labels || num_vec<=0)
00075         SG_ERROR("num_vec=%d num_train_labels=%d\n", num_vec, num_train_labels);
00076 
00077     delete[] w;
00078     w=new float64_t[w_dim];
00079     memset(w, 0, w_dim*sizeof(float64_t));
00080 
00081     delete[] old_w;
00082     old_w=new float64_t[w_dim];
00083     memset(old_w, 0, w_dim*sizeof(float64_t));
00084     bias=0;
00085     old_bias=0;
00086 
00087     tmp_a_buf=new float64_t[w_dim];
00088     cp_value=new float64_t*[bufsize];
00089     memset(cp_value, sizeof(float64_t*)*bufsize, 0);
00090     cp_index=new uint32_t*[bufsize];
00091     memset(cp_index, sizeof(float64_t*)*bufsize, 0);
00092     cp_nz_dims=new uint32_t[bufsize];
00093     cp_bias=new float64_t[bufsize];
00094     memset(cp_bias, 0, sizeof(float64_t)*bufsize);
00095 
00096     float64_t TolAbs=0;
00097     float64_t QPBound=0;
00098     int32_t Method=0;
00099     if (method == SVM_OCAS)
00100         Method = 1;
00101     ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
00102             TolAbs, QPBound, get_max_train_time(), bufsize, Method, 
00103             &CSVMOcas::compute_W,
00104             &CSVMOcas::update_W, 
00105             &CSVMOcas::add_new_cut, 
00106             &CSVMOcas::compute_output,
00107             &CSVMOcas::sort,
00108             &CSVMOcas::print,
00109             this);
00110 
00111     SG_INFO("Ocas Converged after %d iterations\n"
00112             "==================================\n"
00113             "timing statistics:\n"
00114             "output_time: %f s\n"
00115             "sort_time: %f s\n"
00116             "add_time: %f s\n"
00117             "w_time: %f s\n"
00118             "solver_time %f s\n"
00119             "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
00120             result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
00121 
00122     delete[] tmp_a_buf;
00123 
00124     uint32_t num_cut_planes = result.nCutPlanes;
00125 
00126     SG_DEBUG("num_cut_planes=%d\n", num_cut_planes);
00127     for (uint32_t i=0; i<num_cut_planes; i++)
00128     {
00129         SG_DEBUG("cp_value[%d]=%p\n", i, cp_value);
00130         delete[] cp_value[i];
00131         SG_DEBUG("cp_index[%d]=%p\n", i, cp_index);
00132         delete[] cp_index[i];
00133     }
00134 
00135     delete[] cp_value;
00136     cp_value=NULL;
00137     delete[] cp_index;
00138     cp_index=NULL;
00139     delete[] cp_nz_dims;
00140     cp_nz_dims=NULL;
00141     delete[] cp_bias;
00142     cp_bias=NULL;
00143 
00144     delete[] lab;
00145     lab=NULL;
00146 
00147     delete[] old_w;
00148     old_w=NULL;
00149 
00150     return true;
00151 }
00152 
00153 /*----------------------------------------------------------------------------------
00154   sq_norm_W = sparse_update_W( t ) does the following:
00155 
00156   W = oldW*(1-t) + t*W;
00157   sq_norm_W = W'*W;
00158 
00159   ---------------------------------------------------------------------------------*/
00160 float64_t CSVMOcas::update_W( float64_t t, void* ptr )
00161 {
00162   float64_t sq_norm_W = 0;         
00163   CSVMOcas* o = (CSVMOcas*) ptr;
00164   uint32_t nDim = (uint32_t) o->w_dim;
00165   float64_t* W=o->w;
00166   float64_t* oldW=o->old_w;
00167 
00168   for(uint32_t j=0; j <nDim; j++)
00169   {
00170       W[j] = oldW[j]*(1-t) + t*W[j];
00171       sq_norm_W += W[j]*W[j];
00172   }          
00173   o->bias=o->old_bias*(1-t) + t*o->bias;
00174   sq_norm_W += CMath::sq(o->bias);
00175 
00176   return( sq_norm_W );
00177 }
00178 
00179 /*----------------------------------------------------------------------------------
00180   sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
00181 
00182     new_a = sum(data_X(:,find(new_cut ~=0 )),2);
00183     new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
00184     sparse_A(:,nSel+1) = new_a;
00185 
00186   ---------------------------------------------------------------------------------*/
00187 int CSVMOcas::add_new_cut(
00188     float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
00189     uint32_t nSel, void* ptr)
00190 {
00191     CSVMOcas* o = (CSVMOcas*) ptr;
00192     CDotFeatures* f = o->features;
00193     uint32_t nDim=(uint32_t) o->w_dim;
00194     float64_t* y = o->lab;
00195 
00196     float64_t** c_val = o->cp_value;
00197     uint32_t** c_idx = o->cp_index;
00198     uint32_t* c_nzd = o->cp_nz_dims;
00199     float64_t* c_bias = o->cp_bias;
00200 
00201     float64_t sq_norm_a;
00202     uint32_t i, j, nz_dims;
00203 
00204     /* temporary vector */
00205     float64_t* new_a = o->tmp_a_buf;
00206     memset(new_a, 0, sizeof(float64_t)*nDim);
00207 
00208     for(i=0; i < cut_length; i++) 
00209     {
00210         f->add_to_dense_vec(y[new_cut[i]], new_cut[i], new_a, nDim);
00211 
00212         if (o->use_bias)
00213             c_bias[nSel]+=y[new_cut[i]];
00214     }
00215 
00216     /* compute new_a'*new_a and count number of non-zerou dimensions */
00217     nz_dims = 0; 
00218     sq_norm_a = CMath::sq(c_bias[nSel]);
00219     for(j=0; j < nDim; j++ ) {
00220         if(new_a[j] != 0) {
00221             nz_dims++;
00222             sq_norm_a += new_a[j]*new_a[j];
00223         }
00224     }
00225 
00226     /* sparsify new_a and insert it to the last column of sparse_A */
00227     c_nzd[nSel] = nz_dims;
00228     c_idx[nSel]=NULL;
00229     c_val[nSel]=NULL;
00230 
00231     if(nz_dims > 0)
00232     {
00233         c_idx[nSel]=new uint32_t[nz_dims];
00234         c_val[nSel]=new float64_t[nz_dims];
00235 
00236         uint32_t idx=0;
00237         for(j=0; j < nDim; j++ )
00238         {
00239             if(new_a[j] != 0)
00240             {
00241                 c_idx[nSel][idx] = j;
00242                 c_val[nSel][idx++] = new_a[j];
00243             }
00244         }
00245     }
00246 
00247     new_col_H[nSel] = sq_norm_a;
00248 
00249     for(i=0; i < nSel; i++)
00250     {
00251         float64_t tmp = c_bias[nSel]*c_bias[i];
00252         for(j=0; j < c_nzd[i]; j++)
00253             tmp += new_a[c_idx[i][j]]*c_val[i][j];
00254 
00255         new_col_H[i] = tmp;
00256     }
00257     //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
00258     //CMath::display_vector((int32_t*) c_idx[nSel], (int32_t) nz_dims, "c_idx");
00259     //CMath::display_vector((float64_t*) c_val[nSel], nz_dims, "c_val");
00260     return 0;
00261 }
00262 
00263 int CSVMOcas::sort(float64_t* vals, float64_t* data, uint32_t size)
00264 {
00265     CMath::qsort_index(vals, data, size);
00266     return 0;
00267 }
00268 
00269 /*----------------------------------------------------------------------
00270   sparse_compute_output( output ) does the follwing:
00271 
00272   output = data_X'*W;
00273   ----------------------------------------------------------------------*/
00274 int CSVMOcas::compute_output(float64_t *output, void* ptr)
00275 {
00276     CSVMOcas* o = (CSVMOcas*) ptr;
00277     CDotFeatures* f=o->features;
00278     int32_t nData=f->get_num_vectors();
00279 
00280     float64_t* y = o->lab;
00281 
00282     f->dense_dot_range(output, 0, nData, y, o->w, o->w_dim, 0.0);
00283 
00284     for (int32_t i=0; i<nData; i++)
00285         output[i]+=y[i]*o->bias;
00286     //CMath::display_vector(o->w, o->w_dim, "w");
00287     //CMath::display_vector(output, nData, "out");
00288     return 0;
00289 }
00290 
00291 /*----------------------------------------------------------------------
00292   sq_norm_W = compute_W( alpha, nSel ) does the following:
00293 
00294   oldW = W;
00295   W = sparse_A(:,1:nSel)'*alpha;
00296   sq_norm_W = W'*W;
00297   dp_WoldW = W'*oldW';
00298 
00299   ----------------------------------------------------------------------*/
00300 void CSVMOcas::compute_W(
00301     float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
00302     void* ptr )
00303 {
00304     CSVMOcas* o = (CSVMOcas*) ptr;
00305     uint32_t nDim= (uint32_t) o->w_dim;
00306     CMath::swap(o->w, o->old_w);
00307     float64_t* W=o->w;
00308     float64_t* oldW=o->old_w;
00309     memset(W, 0, sizeof(float64_t)*nDim);
00310     float64_t old_bias=o->bias;
00311     float64_t bias=0;
00312 
00313     float64_t** c_val = o->cp_value;
00314     uint32_t** c_idx = o->cp_index;
00315     uint32_t* c_nzd = o->cp_nz_dims;
00316     float64_t* c_bias = o->cp_bias;
00317 
00318     for(uint32_t i=0; i<nSel; i++)
00319     {
00320         uint32_t nz_dims = c_nzd[i];
00321 
00322         if(nz_dims > 0 && alpha[i] > 0)
00323         {
00324             for(uint32_t j=0; j < nz_dims; j++)
00325                 W[c_idx[i][j]] += alpha[i]*c_val[i][j];
00326         }
00327         bias += c_bias[i]*alpha[i];
00328     }
00329 
00330     *sq_norm_W = CMath::dot(W,W, nDim) + CMath::sq(bias);
00331     *dp_WoldW = CMath::dot(W,oldW, nDim) + bias*old_bias;
00332     //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW);
00333     
00334     o->bias = bias;
00335     o->old_bias = old_bias;
00336 }
00337 
00338 void CSVMOcas::init()
00339 {
00340     use_bias=true;
00341     bufsize=3000;
00342     C1=1;
00343     C2=1;
00344 
00345     epsilon=1e-3;
00346     method=SVM_OCAS;
00347     w=NULL;
00348     old_w=NULL;
00349     tmp_a_buf=NULL;
00350     lab=NULL;
00351     cp_value=NULL;
00352     cp_index=NULL;
00353     cp_nz_dims=NULL;
00354     cp_bias=NULL;
00355 
00356     m_parameters->add(&C1, "C1",  "Cost constant 1.");
00357     m_parameters->add(&C2, "C2",  "Cost constant 2.");
00358     m_parameters->add(&use_bias, "use_bias",
00359             "Indicates if bias is used.");
00360     m_parameters->add(&epsilon, "epsilon", "Convergence precision.");
00361     m_parameters->add(&bufsize, "bufsize", "Maximum number of cutting planes.");
00362     m_parameters->add((machine_int_t*) &method, "method",
00363             "SVMOcas solver type.");
00364 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation