WDSVMOcas.cpp

Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or modify
00003  * it under the terms of the GNU General Public License as published by
00004  * the Free Software Foundation; either version 3 of the License, or
00005  * (at your option) any later version.
00006  *
00007  * Written (W) 2007-2008 Vojtech Franc
00008  * Written (W) 2007-2009 Soeren Sonnenburg
00009  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
00010  */
00011 
00012 #include "features/Labels.h"
00013 #include "lib/Mathematics.h"
00014 #include "lib/DynamicArray.h"
00015 #include "lib/Time.h"
00016 #include "base/Parallel.h"
00017 #include "classifier/Classifier.h"
00018 #include "classifier/svm/libocas.h"
00019 #include "classifier/svm/WDSVMOcas.h"
00020 #include "features/StringFeatures.h"
00021 #include "features/Alphabet.h"
00022 #include "features/Labels.h"
00023 
00024 using namespace shogun;
00025 
00026 #ifndef DOXYGEN_SHOULD_SKIP_THIS
00027 struct wdocas_thread_params_output
00028 {
00029     float32_t* out;
00030     int32_t* val;
00031     float64_t* output;
00032     CWDSVMOcas* wdocas;
00033     int32_t start;
00034     int32_t end;
00035 };
00036 
00037 struct wdocas_thread_params_add
00038 {
00039     CWDSVMOcas* wdocas;
00040     float32_t* new_a;
00041     uint32_t* new_cut;
00042     int32_t start;
00043     int32_t end;
00044     uint32_t cut_length;
00045 };
00046 #endif // DOXYGEN_SHOULD_SKIP_THIS
00047 
00048 CWDSVMOcas::CWDSVMOcas(void)
00049 : CClassifier(), use_bias(false), bufsize(3000), C1(1), C2(1),
00050     epsilon(1e-3), method(SVM_OCAS)
00051 {
00052     SG_UNSTABLE("CWDSVMOcas::CWDSVMOcas(void)", "\n");
00053 
00054     w=NULL;
00055     old_w=NULL;
00056     features=NULL;
00057     degree=6;
00058     from_degree=40;
00059     wd_weights=NULL;
00060     w_offsets=NULL;
00061     normalization_const=1.0;
00062 }
00063 
00064 CWDSVMOcas::CWDSVMOcas(E_SVM_TYPE type)
00065 : CClassifier(), use_bias(false), bufsize(3000), C1(1), C2(1),
00066     epsilon(1e-3), method(type)
00067 {
00068     w=NULL;
00069     old_w=NULL;
00070     features=NULL;
00071     degree=6;
00072     from_degree=40;
00073     wd_weights=NULL;
00074     w_offsets=NULL;
00075     normalization_const=1.0;
00076 }
00077 
00078 CWDSVMOcas::CWDSVMOcas(
00079     float64_t C, int32_t d, int32_t from_d, CStringFeatures<uint8_t>* traindat,
00080     CLabels* trainlab)
00081 : CClassifier(), use_bias(false), bufsize(3000), C1(C), C2(C), epsilon(1e-3),
00082     degree(d), from_degree(from_d)
00083 {
00084     w=NULL;
00085     old_w=NULL;
00086     method=SVM_OCAS;
00087     features=traindat;
00088     set_labels(trainlab);
00089     wd_weights=NULL;
00090     w_offsets=NULL;
00091     normalization_const=1.0;
00092 }
00093 
00094 
00095 CWDSVMOcas::~CWDSVMOcas()
00096 {
00097 }
00098 
00099 CLabels* CWDSVMOcas::classify()
00100 {
00101     set_wd_weights();
00102     set_normalization_const();
00103 
00104     if (features)
00105     {
00106         int32_t num=features->get_num_vectors();
00107         ASSERT(num>0);
00108 
00109         CLabels* output=new CLabels(num);
00110         SG_REF(output);
00111 
00112         for (int32_t i=0; i<num; i++)
00113             output->set_label(i, classify_example(i));
00114 
00115         return output;
00116     }
00117 
00118     return NULL;
00119 }
00120 
00121 CLabels* CWDSVMOcas::classify(CFeatures* data)
00122 {
00123     if (!data)
00124         SG_ERROR("No features specified\n");
00125 
00126     if (data->get_feature_class() != C_STRING ||
00127             data->get_feature_type() != F_BYTE)
00128     {
00129         SG_ERROR("Features not of class string type byte\n");
00130     }
00131 
00132     set_features((CStringFeatures<uint8_t>*) data);
00133     return classify();
00134 }
00135 
00136 int32_t CWDSVMOcas::set_wd_weights()
00137 {
00138     ASSERT(degree>0 && degree<=8);
00139     delete[] wd_weights;
00140     wd_weights=new float32_t[degree];
00141     delete[] w_offsets;
00142     w_offsets=new int32_t[degree];
00143     int32_t w_dim_single_c=0;
00144 
00145     for (int32_t i=0; i<degree; i++)
00146     {
00147         w_offsets[i]=CMath::pow(alphabet_size, i+1);
00148         wd_weights[i]=sqrt(2.0*(from_degree-i)/(from_degree*(from_degree+1)));
00149         w_dim_single_c+=w_offsets[i];
00150     }
00151     return w_dim_single_c;
00152 }
00153 
00154 bool CWDSVMOcas::train(CFeatures* data)
00155 {
00156     SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize);
00157 
00158     ASSERT(labels);
00159     if (data)
00160     {
00161         if (data->get_feature_class() != C_STRING ||
00162                 data->get_feature_type() != F_BYTE)
00163         {
00164             SG_ERROR("Features not of class string type byte\n");
00165         }
00166         set_features((CStringFeatures<uint8_t>*) data);
00167     }
00168 
00169     ASSERT(get_features());
00170     ASSERT(labels->is_two_class_labeling());
00171     CAlphabet* alphabet=get_features()->get_alphabet();
00172     ASSERT(alphabet && alphabet->get_alphabet()==RAWDNA);
00173 
00174     alphabet_size=alphabet->get_num_symbols();
00175     string_length=features->get_num_vectors();
00176     int32_t num_train_labels=0;
00177     lab=labels->get_labels(num_train_labels);
00178 
00179     w_dim_single_char=set_wd_weights();
00180     //CMath::display_vector(wd_weights, degree, "wd_weights");
00181     SG_DEBUG("w_dim_single_char=%d\n", w_dim_single_char);
00182     w_dim=string_length*w_dim_single_char;
00183     SG_DEBUG("cutting plane has %d dims\n", w_dim);
00184     num_vec=get_features()->get_max_vector_length();
00185 
00186     set_normalization_const();
00187     SG_INFO("num_vec: %d num_lab: %d\n", num_vec, num_train_labels);
00188     ASSERT(num_vec==num_train_labels);
00189     ASSERT(num_vec>0);
00190 
00191     delete[] w;
00192     w=new float32_t[w_dim];
00193     memset(w, 0, w_dim*sizeof(float32_t));
00194 
00195     delete[] old_w;
00196     old_w=new float32_t[w_dim];
00197     memset(old_w, 0, w_dim*sizeof(float32_t));
00198     bias=0;
00199     old_bias=0;
00200 
00201     cuts=new float32_t*[bufsize];
00202     memset(cuts, 0, sizeof(*cuts)*bufsize);
00203     cp_bias=new float64_t[bufsize];
00204     memset(cp_bias, 0, sizeof(float64_t)*bufsize);
00205 
00207     /*float64_t* tmp = new float64_t[num_vec];
00208     float64_t start=CTime::get_curtime();
00209     CMath::random_vector(w, w_dim, (float32_t) 0, (float32_t) 1000);
00210     compute_output(tmp, this);
00211     start=CTime::get_curtime()-start;
00212     SG_PRINT("timing:%f\n", start);
00213     delete[] tmp;
00214     exit(1);*/
00216     float64_t TolAbs=0;
00217     float64_t QPBound=0;
00218     uint8_t Method=0;
00219     if (method == SVM_OCAS)
00220         Method = 1;
00221     ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
00222             TolAbs, QPBound, get_max_train_time(), bufsize, Method,
00223             &CWDSVMOcas::compute_W,
00224             &CWDSVMOcas::update_W,
00225             &CWDSVMOcas::add_new_cut,
00226             &CWDSVMOcas::compute_output,
00227             &CWDSVMOcas::sort,
00228             &CWDSVMOcas::print,
00229             this);
00230 
00231     SG_INFO("Ocas Converged after %d iterations\n"
00232             "==================================\n"
00233             "timing statistics:\n"
00234             "output_time: %f s\n"
00235             "sort_time: %f s\n"
00236             "add_time: %f s\n"
00237             "w_time: %f s\n"
00238             "solver_time %f s\n"
00239             "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
00240             result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
00241 
00242     for (int32_t i=bufsize-1; i>=0; i--)
00243         delete[] cuts[i];
00244     delete[] cuts;
00245 
00246     delete[] lab;
00247     lab=NULL;
00248 
00249     SG_UNREF(alphabet);
00250 
00251     return true;
00252 }
00253 
00254 /*----------------------------------------------------------------------------------
00255   sq_norm_W = sparse_update_W( t ) does the following:
00256 
00257   W = oldW*(1-t) + t*W;
00258   sq_norm_W = W'*W;
00259 
00260   ---------------------------------------------------------------------------------*/
00261 float64_t CWDSVMOcas::update_W( float64_t t, void* ptr )
00262 {
00263   float64_t sq_norm_W = 0;         
00264   CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00265   uint32_t nDim = (uint32_t) o->w_dim;
00266   float32_t* W=o->w;
00267   float32_t* oldW=o->old_w;
00268   float64_t bias=o->bias;
00269   float64_t old_bias=bias;
00270 
00271   for(uint32_t j=0; j <nDim; j++)
00272   {
00273       W[j] = oldW[j]*(1-t) + t*W[j];
00274       sq_norm_W += W[j]*W[j];
00275   }          
00276 
00277   bias=old_bias*(1-t) + t*bias;
00278   sq_norm_W += CMath::sq(bias);
00279 
00280   o->bias=bias;
00281   o->old_bias=old_bias;
00282 
00283   return( sq_norm_W );
00284 }
00285 
00286 /*----------------------------------------------------------------------------------
00287   sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
00288 
00289     new_a = sum(data_X(:,find(new_cut ~=0 )),2);
00290     new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
00291     sparse_A(:,nSel+1) = new_a;
00292 
00293   ---------------------------------------------------------------------------------*/
00294 void* CWDSVMOcas::add_new_cut_helper( void* ptr)
00295 {
00296     wdocas_thread_params_add* p = (wdocas_thread_params_add*) ptr;
00297     CWDSVMOcas* o = p->wdocas;
00298     int32_t start = p->start;
00299     int32_t end = p->end;
00300     int32_t string_length = o->string_length;
00301     //uint32_t nDim=(uint32_t) o->w_dim;
00302     uint32_t cut_length=p->cut_length;
00303     uint32_t* new_cut=p->new_cut;
00304     int32_t* w_offsets = o->w_offsets;
00305     float64_t* y = o->lab;
00306     int32_t alphabet_size = o->alphabet_size;
00307     float32_t* wd_weights = o->wd_weights;
00308     int32_t degree = o->degree;
00309     CStringFeatures<uint8_t>* f = o->features;
00310     float64_t normalization_const = o->normalization_const;
00311 
00312     // temporary vector
00313     float32_t* new_a = p->new_a;
00314     //float32_t* new_a = new float32_t[nDim];
00315     //memset(new_a, 0, sizeof(float32_t)*nDim);
00316 
00317     int32_t* val=new int32_t[cut_length];
00318     for (int32_t j=start; j<end; j++)
00319     {
00320         int32_t offs=o->w_dim_single_char*j;
00321         memset(val,0,sizeof(int32_t)*cut_length);
00322         int32_t lim=CMath::min(degree, string_length-j);
00323         int32_t len;
00324 
00325         for (int32_t k=0; k<lim; k++)
00326         {
00327             bool free_vec;
00328             uint8_t* vec = f->get_feature_vector(j+k, len, free_vec);
00329             float32_t wd = wd_weights[k]/normalization_const;
00330 
00331             for(uint32_t i=0; i < cut_length; i++) 
00332             {
00333                 val[i]=val[i]*alphabet_size + vec[new_cut[i]];
00334                 new_a[offs+val[i]]+=wd * y[new_cut[i]];
00335             }
00336             offs+=w_offsets[k];
00337             f->free_feature_vector(vec, j+k, free_vec);
00338         }
00339     }
00340 
00341     //p->new_a=new_a;
00342     delete[] val;
00343     return NULL;
00344 }
00345 
00346 int CWDSVMOcas::add_new_cut(
00347     float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
00348     uint32_t nSel, void* ptr)
00349 {
00350     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00351     int32_t string_length = o->string_length;
00352     uint32_t nDim=(uint32_t) o->w_dim;
00353     float32_t** cuts=o->cuts;
00354     float64_t* c_bias = o->cp_bias;
00355 
00356     uint32_t i;
00357     wdocas_thread_params_add* params_add=new wdocas_thread_params_add[o->parallel->get_num_threads()];
00358     pthread_t* threads=new pthread_t[o->parallel->get_num_threads()];
00359     float32_t* new_a=new float32_t[nDim];
00360     memset(new_a, 0, sizeof(float32_t)*nDim);
00361 
00362     int32_t t;
00363     int32_t nthreads=o->parallel->get_num_threads()-1;
00364     int32_t end=0;
00365     int32_t step= string_length/o->parallel->get_num_threads();
00366 
00367     if (step<1)
00368     {
00369         nthreads=string_length-1;
00370         step=1;
00371     }
00372 
00373     for (t=0; t<nthreads; t++)
00374     {
00375         params_add[t].wdocas=o;
00376         //params_add[t].new_a=NULL;
00377         params_add[t].new_a=new_a;
00378         params_add[t].new_cut=new_cut;
00379         params_add[t].start = step*t;
00380         params_add[t].end = step*(t+1);
00381         params_add[t].cut_length = cut_length;
00382 
00383         if (pthread_create(&threads[t], NULL, &CWDSVMOcas::add_new_cut_helper, (void*)&params_add[t]) != 0)
00384         {
00385             nthreads=t;
00386             SG_SWARNING("thread creation failed\n");
00387             break;
00388         }
00389         end=params_add[t].end;
00390     }
00391 
00392     params_add[t].wdocas=o;
00393     //params_add[t].new_a=NULL;
00394     params_add[t].new_a=new_a;
00395     params_add[t].new_cut=new_cut;
00396     params_add[t].start = step*t;
00397     params_add[t].end = string_length;
00398     params_add[t].cut_length = cut_length;
00399     add_new_cut_helper(&params_add[t]);
00400     //float32_t* new_a=params_add[t].new_a;
00401 
00402     for (t=0; t<nthreads; t++)
00403     {
00404         if (pthread_join(threads[t], NULL) != 0)
00405             SG_SWARNING( "pthread_join failed\n");
00406 
00407         //float32_t* a=params_add[t].new_a;
00408         //for (i=0; i<nDim; i++)
00409         //  new_a[i]+=a[i];
00410         //delete[] a;
00411     }
00412 
00413     for(i=0; i < cut_length; i++) 
00414     {
00415         if (o->use_bias)
00416             c_bias[nSel]+=o->lab[new_cut[i]];
00417     }
00418 
00419     // insert new_a into the last column of sparse_A
00420     for(i=0; i < nSel; i++)
00421         new_col_H[i] = CMath::dot(new_a, cuts[i], nDim) + c_bias[nSel]*c_bias[i];
00422     new_col_H[nSel] = CMath::dot(new_a, new_a, nDim) + CMath::sq(c_bias[nSel]);
00423 
00424     cuts[nSel]=new_a;
00425     //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
00426     //CMath::display_vector(cuts[nSel], nDim, "cut[nSel]");
00427     //
00428     delete[] threads;
00429     delete[] params_add;
00430 
00431     return 0;
00432 }
00433 
00434 int CWDSVMOcas::sort( float64_t* vals, float64_t* data, uint32_t size)
00435 {
00436     CMath::qsort_index(vals, data, size);
00437     return 0;
00438 }
00439 
00440 /*----------------------------------------------------------------------
00441   sparse_compute_output( output ) does the follwing:
00442 
00443   output = data_X'*W;
00444   ----------------------------------------------------------------------*/
00445 void* CWDSVMOcas::compute_output_helper(void* ptr)
00446 {
00447     wdocas_thread_params_output* p = (wdocas_thread_params_output*) ptr;
00448     CWDSVMOcas* o = p->wdocas;
00449     int32_t start = p->start;
00450     int32_t end = p->end;
00451     float32_t* out = p->out;
00452     float64_t* output = p->output;
00453     int32_t* val = p->val;
00454 
00455     CStringFeatures<uint8_t>* f=o->get_features();
00456 
00457     int32_t degree = o->degree;
00458     int32_t string_length = o->string_length;
00459     int32_t alphabet_size = o->alphabet_size;
00460     int32_t* w_offsets = o->w_offsets;
00461     float32_t* wd_weights = o->wd_weights;
00462     float32_t* w= o->w;
00463 
00464     float64_t* y = o->lab;
00465     float64_t normalization_const = o->normalization_const;
00466 
00467 
00468     for (int32_t j=0; j<string_length; j++)
00469     {
00470         int32_t offs=o->w_dim_single_char*j;
00471         for (int32_t i=start ; i<end; i++)
00472             val[i]=0;
00473 
00474         int32_t lim=CMath::min(degree, string_length-j);
00475         int32_t len;
00476 
00477         for (int32_t k=0; k<lim; k++)
00478         {
00479             bool free_vec;
00480             uint8_t* vec=f->get_feature_vector(j+k, len, free_vec);
00481             float32_t wd = wd_weights[k];
00482 
00483             for (int32_t i=start; i<end; i++) // quite fast 1.9s
00484             {
00485                 val[i]=val[i]*alphabet_size + vec[i];
00486                 out[i]+=wd*w[offs+val[i]];
00487             }
00488 
00489             /*for (int32_t i=0; i<nData/4; i++) // slowest 2s
00490             {
00491                 uint32_t x=((uint32_t*) vec)[i];
00492                 int32_t ii=4*i;
00493                 val[ii]=val[ii]*alphabet_size + (x&255);
00494                 val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
00495                 val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
00496                 val[ii+3]=val[ii+3]*alphabet_size + (x>>24);
00497                 out[ii]+=wd*w[offs+val[ii]];
00498                 out[ii+1]+=wd*w[offs+val[ii+1]];
00499                 out[ii+2]+=wd*w[offs+val[ii+2]];
00500                 out[ii+3]+=wd*w[offs+val[ii+3]];
00501             }*/
00502 
00503             /*for (int32_t i=0; i<nData>>3; i++) // fastest on 64bit: 1.5s
00504             {
00505                 uint64_t x=((uint64_t*) vec)[i];
00506                 int32_t ii=i<<3;
00507                 val[ii]=val[ii]*alphabet_size + (x&255);
00508                 val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
00509                 val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
00510                 val[ii+3]=val[ii+3]*alphabet_size + ((x>>24)&255);
00511                 val[ii+4]=val[ii+4]*alphabet_size + ((x>>32)&255);
00512                 val[ii+5]=val[ii+5]*alphabet_size + ((x>>40)&255);
00513                 val[ii+6]=val[ii+6]*alphabet_size + ((x>>48)&255);
00514                 val[ii+7]=val[ii+7]*alphabet_size + (x>>56);
00515                 out[ii]+=wd*w[offs+val[ii]];
00516                 out[ii+1]+=wd*w[offs+val[ii+1]];
00517                 out[ii+2]+=wd*w[offs+val[ii+2]];
00518                 out[ii+3]+=wd*w[offs+val[ii+3]];
00519                 out[ii+4]+=wd*w[offs+val[ii+4]];
00520                 out[ii+5]+=wd*w[offs+val[ii+5]];
00521                 out[ii+6]+=wd*w[offs+val[ii+6]];
00522                 out[ii+7]+=wd*w[offs+val[ii+7]];
00523             }*/
00524             offs+=w_offsets[k];
00525             f->free_feature_vector(vec, j+k, free_vec);
00526         }
00527     }
00528 
00529     for (int32_t i=start; i<end; i++)
00530         output[i]=y[i]*o->bias + out[i]*y[i]/normalization_const;
00531 
00532     //CMath::display_vector(o->w, o->w_dim, "w");
00533     //CMath::display_vector(output, nData, "out");
00534     return NULL;
00535 }
00536 
00537 int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
00538 {
00539     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00540     int32_t nData=o->num_vec;
00541     wdocas_thread_params_output* params_output=new wdocas_thread_params_output[o->parallel->get_num_threads()];
00542     pthread_t* threads = new pthread_t[o->parallel->get_num_threads()];
00543 
00544     float32_t* out=new float32_t[nData];
00545     int32_t* val=new int32_t[nData];
00546     memset(out, 0, sizeof(float32_t)*nData);
00547 
00548     int32_t t;
00549     int32_t nthreads=o->parallel->get_num_threads()-1;
00550     int32_t end=0;
00551     int32_t step= nData/o->parallel->get_num_threads();
00552 
00553     if (step<1)
00554     {
00555         nthreads=nData-1;
00556         step=1;
00557     }
00558 
00559     for (t=0; t<nthreads; t++)
00560     {
00561         params_output[t].wdocas=o;
00562         params_output[t].output=output;
00563         params_output[t].out=out;
00564         params_output[t].val=val;
00565         params_output[t].start = step*t;
00566         params_output[t].end = step*(t+1);
00567 
00568         //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output);
00569         if (pthread_create(&threads[t], NULL, &CWDSVMOcas::compute_output_helper, (void*)&params_output[t]) != 0)
00570         {
00571             nthreads=t;
00572             SG_SWARNING("thread creation failed\n");
00573             break;
00574         }
00575         end=params_output[t].end;
00576     }
00577 
00578     params_output[t].wdocas=o;
00579     params_output[t].output=output;
00580     params_output[t].out=out;
00581     params_output[t].val=val;
00582     params_output[t].start = step*t;
00583     params_output[t].end = nData;
00584     compute_output_helper(&params_output[t]);
00585     //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output);
00586 
00587     for (t=0; t<nthreads; t++)
00588     {
00589         if (pthread_join(threads[t], NULL) != 0)
00590             SG_SWARNING( "pthread_join failed\n");
00591     }
00592     delete[] threads;
00593     delete[] params_output;
00594     delete[] val;
00595     delete[] out;
00596     return 0;
00597 }
00598 /*----------------------------------------------------------------------
00599   sq_norm_W = compute_W( alpha, nSel ) does the following:
00600 
00601   oldW = W;
00602   W = sparse_A(:,1:nSel)'*alpha;
00603   sq_norm_W = W'*W;
00604   dp_WoldW = W'*oldW';
00605 
00606   ----------------------------------------------------------------------*/
00607 void CWDSVMOcas::compute_W(
00608     float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
00609     void* ptr)
00610 {
00611     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00612     uint32_t nDim= (uint32_t) o->w_dim;
00613     CMath::swap(o->w, o->old_w);
00614     float32_t* W=o->w;
00615     float32_t* oldW=o->old_w;
00616     float32_t** cuts=o->cuts;
00617     memset(W, 0, sizeof(float32_t)*nDim);
00618     float64_t* c_bias = o->cp_bias;
00619     float64_t old_bias=o->bias;
00620     float64_t bias=0;
00621 
00622     for (uint32_t i=0; i<nSel; i++)
00623     {
00624         if (alpha[i] > 0)
00625             CMath::vec1_plus_scalar_times_vec2(W, (float32_t) alpha[i], cuts[i], nDim);
00626 
00627         bias += c_bias[i]*alpha[i];
00628     }
00629 
00630     *sq_norm_W = CMath::dot(W,W, nDim) +CMath::sq(bias);
00631     *dp_WoldW = CMath::dot(W,oldW, nDim) + bias*old_bias;;
00632     //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW);
00633 
00634     o->bias = bias;
00635     o->old_bias = old_bias;
00636 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation