WDSVMOcas.cpp

Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or modify
00003  * it under the terms of the GNU General Public License as published by
00004  * the Free Software Foundation; either version 3 of the License, or
00005  * (at your option) any later version.
00006  *
00007  * Written (W) 2007-2008 Vojtech Franc
00008  * Written (W) 2007-2009 Soeren Sonnenburg
00009  * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society
00010  */
00011 
00012 #include <shogun/labels/Labels.h>
00013 #include <shogun/mathematics/Math.h>
00014 #include <shogun/lib/DynamicArray.h>
00015 #include <shogun/lib/Time.h>
00016 #include <shogun/base/Parallel.h>
00017 #include <shogun/machine/Machine.h>
00018 #include <shogun/lib/external/libocas.h>
00019 #include <shogun/classifier/svm/WDSVMOcas.h>
00020 #include <shogun/features/StringFeatures.h>
00021 #include <shogun/features/Alphabet.h>
00022 #include <shogun/labels/Labels.h>
00023 #include <shogun/labels/BinaryLabels.h>
00024 
00025 using namespace shogun;
00026 
00027 #ifndef DOXYGEN_SHOULD_SKIP_THIS
00028 struct wdocas_thread_params_output
00029 {
00030     float32_t* out;
00031     int32_t* val;
00032     float64_t* output;
00033     CWDSVMOcas* wdocas;
00034     int32_t start;
00035     int32_t end;
00036 };
00037 
00038 struct wdocas_thread_params_add
00039 {
00040     CWDSVMOcas* wdocas;
00041     float32_t* new_a;
00042     uint32_t* new_cut;
00043     int32_t start;
00044     int32_t end;
00045     uint32_t cut_length;
00046 };
00047 #endif // DOXYGEN_SHOULD_SKIP_THIS
00048 
00049 CWDSVMOcas::CWDSVMOcas()
00050 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
00051     epsilon(1e-3), method(SVM_OCAS)
00052 {
00053     SG_UNSTABLE("CWDSVMOcas::CWDSVMOcas()", "\n");
00054 
00055     w=NULL;
00056     old_w=NULL;
00057     features=NULL;
00058     degree=6;
00059     from_degree=40;
00060     wd_weights=NULL;
00061     w_offsets=NULL;
00062     normalization_const=1.0;
00063 }
00064 
00065 CWDSVMOcas::CWDSVMOcas(E_SVM_TYPE type)
00066 : CMachine(), use_bias(false), bufsize(3000), C1(1), C2(1),
00067     epsilon(1e-3), method(type)
00068 {
00069     w=NULL;
00070     old_w=NULL;
00071     features=NULL;
00072     degree=6;
00073     from_degree=40;
00074     wd_weights=NULL;
00075     w_offsets=NULL;
00076     normalization_const=1.0;
00077 }
00078 
00079 CWDSVMOcas::CWDSVMOcas(
00080     float64_t C, int32_t d, int32_t from_d, CStringFeatures<uint8_t>* traindat,
00081     CLabels* trainlab)
00082 : CMachine(), use_bias(false), bufsize(3000), C1(C), C2(C), epsilon(1e-3),
00083     degree(d), from_degree(from_d)
00084 {
00085     w=NULL;
00086     old_w=NULL;
00087     method=SVM_OCAS;
00088     features=traindat;
00089     set_labels(trainlab);
00090     wd_weights=NULL;
00091     w_offsets=NULL;
00092     normalization_const=1.0;
00093 }
00094 
00095 
00096 CWDSVMOcas::~CWDSVMOcas()
00097 {
00098 }
00099 
00100 CBinaryLabels* CWDSVMOcas::apply_binary(CFeatures* data)
00101 {
00102     SGVector<float64_t> outputs = apply_get_outputs(data);
00103     return new CBinaryLabels(outputs);
00104 }
00105 
00106 CRegressionLabels* CWDSVMOcas::apply_regression(CFeatures* data)
00107 {
00108     SGVector<float64_t> outputs = apply_get_outputs(data);
00109     return new CRegressionLabels(outputs);
00110 }
00111 
00112 SGVector<float64_t> CWDSVMOcas::apply_get_outputs(CFeatures* data)
00113 {
00114     if (data)
00115     {
00116         if (data->get_feature_class() != C_STRING ||
00117                 data->get_feature_type() != F_BYTE)
00118         {
00119             SG_ERROR("Features not of class string type byte\n");
00120         }
00121 
00122         set_features((CStringFeatures<uint8_t>*) data);
00123     }
00124     ASSERT(features);
00125 
00126     set_wd_weights();
00127     set_normalization_const();
00128 
00129     SGVector<float64_t> outputs;
00130     if (features)
00131     {
00132         int32_t num=features->get_num_vectors();
00133         ASSERT(num>0);
00134 
00135         outputs = SGVector<float64_t>(num);
00136 
00137         for (int32_t i=0; i<num; i++)
00138             outputs[i] = apply_one(i);
00139     }
00140 
00141     return outputs;
00142 }
00143 
00144 int32_t CWDSVMOcas::set_wd_weights()
00145 {
00146     ASSERT(degree>0 && degree<=8);
00147     SG_FREE(wd_weights);
00148     wd_weights=SG_MALLOC(float32_t, degree);
00149     SG_FREE(w_offsets);
00150     w_offsets=SG_MALLOC(int32_t, degree);
00151     int32_t w_dim_single_c=0;
00152 
00153     for (int32_t i=0; i<degree; i++)
00154     {
00155         w_offsets[i]=CMath::pow(alphabet_size, i+1);
00156         wd_weights[i]=sqrt(2.0*(from_degree-i)/(from_degree*(from_degree+1)));
00157         w_dim_single_c+=w_offsets[i];
00158     }
00159     return w_dim_single_c;
00160 }
00161 
00162 bool CWDSVMOcas::train_machine(CFeatures* data)
00163 {
00164     SG_INFO("C=%f, epsilon=%f, bufsize=%d\n", get_C1(), get_epsilon(), bufsize);
00165 
00166     ASSERT(m_labels);
00167     ASSERT(m_labels->get_label_type() == LT_BINARY);
00168     if (data)
00169     {
00170         if (data->get_feature_class() != C_STRING ||
00171                 data->get_feature_type() != F_BYTE)
00172         {
00173             SG_ERROR("Features not of class string type byte\n");
00174         }
00175         set_features((CStringFeatures<uint8_t>*) data);
00176     }
00177 
00178     ASSERT(get_features());
00179     CAlphabet* alphabet=get_features()->get_alphabet();
00180     ASSERT(alphabet && alphabet->get_alphabet()==RAWDNA);
00181 
00182     alphabet_size=alphabet->get_num_symbols();
00183     string_length=features->get_num_vectors();
00184     SGVector<float64_t> labvec=((CBinaryLabels*) m_labels)->get_labels();
00185     lab=labvec.vector;
00186 
00187     w_dim_single_char=set_wd_weights();
00188     //CMath::display_vector(wd_weights, degree, "wd_weights");
00189     SG_DEBUG("w_dim_single_char=%d\n", w_dim_single_char);
00190     w_dim=string_length*w_dim_single_char;
00191     SG_DEBUG("cutting plane has %d dims\n", w_dim);
00192     num_vec=get_features()->get_max_vector_length();
00193 
00194     set_normalization_const();
00195     SG_INFO("num_vec: %d num_lab: %d\n", num_vec, labvec.vlen);
00196     ASSERT(num_vec==labvec.vlen);
00197     ASSERT(num_vec>0);
00198 
00199     SG_FREE(w);
00200     w=SG_MALLOC(float32_t, w_dim);
00201     memset(w, 0, w_dim*sizeof(float32_t));
00202 
00203     SG_FREE(old_w);
00204     old_w=SG_MALLOC(float32_t, w_dim);
00205     memset(old_w, 0, w_dim*sizeof(float32_t));
00206     bias=0;
00207     old_bias=0;
00208 
00209     cuts=SG_MALLOC(float32_t*, bufsize);
00210     memset(cuts, 0, sizeof(*cuts)*bufsize);
00211     cp_bias=SG_MALLOC(float64_t, bufsize);
00212     memset(cp_bias, 0, sizeof(float64_t)*bufsize);
00213 
00215     /*float64_t* tmp = SG_MALLOC(float64_t, num_vec);
00216     float64_t start=CTime::get_curtime();
00217     CMath::random_vector(w, w_dim, (float32_t) 0, (float32_t) 1000);
00218     compute_output(tmp, this);
00219     start=CTime::get_curtime()-start;
00220     SG_PRINT("timing:%f\n", start);
00221     SG_FREE(tmp);
00222     exit(1);*/
00224     float64_t TolAbs=0;
00225     float64_t QPBound=0;
00226     uint8_t Method=0;
00227     if (method == SVM_OCAS)
00228         Method = 1;
00229     ocas_return_value_T result = svm_ocas_solver( get_C1(), num_vec, get_epsilon(),
00230             TolAbs, QPBound, get_max_train_time(), bufsize, Method,
00231             &CWDSVMOcas::compute_W,
00232             &CWDSVMOcas::update_W,
00233             &CWDSVMOcas::add_new_cut,
00234             &CWDSVMOcas::compute_output,
00235             &CWDSVMOcas::sort,
00236             &CWDSVMOcas::print,
00237             this);
00238 
00239     SG_INFO("Ocas Converged after %d iterations\n"
00240             "==================================\n"
00241             "timing statistics:\n"
00242             "output_time: %f s\n"
00243             "sort_time: %f s\n"
00244             "add_time: %f s\n"
00245             "w_time: %f s\n"
00246             "solver_time %f s\n"
00247             "ocas_time %f s\n\n", result.nIter, result.output_time, result.sort_time,
00248             result.add_time, result.w_time, result.qp_solver_time, result.ocas_time);
00249 
00250     for (int32_t i=bufsize-1; i>=0; i--)
00251         SG_FREE(cuts[i]);
00252     SG_FREE(cuts);
00253 
00254     lab=NULL;
00255     SG_UNREF(alphabet);
00256 
00257     return true;
00258 }
00259 
00260 /*----------------------------------------------------------------------------------
00261   sq_norm_W = sparse_update_W( t ) does the following:
00262 
00263   W = oldW*(1-t) + t*W;
00264   sq_norm_W = W'*W;
00265 
00266   ---------------------------------------------------------------------------------*/
00267 float64_t CWDSVMOcas::update_W( float64_t t, void* ptr )
00268 {
00269   float64_t sq_norm_W = 0;
00270   CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00271   uint32_t nDim = (uint32_t) o->w_dim;
00272   float32_t* W=o->w;
00273   float32_t* oldW=o->old_w;
00274   float64_t bias=o->bias;
00275   float64_t old_bias=bias;
00276 
00277   for(uint32_t j=0; j <nDim; j++)
00278   {
00279       W[j] = oldW[j]*(1-t) + t*W[j];
00280       sq_norm_W += W[j]*W[j];
00281   }
00282 
00283   bias=old_bias*(1-t) + t*bias;
00284   sq_norm_W += CMath::sq(bias);
00285 
00286   o->bias=bias;
00287   o->old_bias=old_bias;
00288 
00289   return( sq_norm_W );
00290 }
00291 
00292 /*----------------------------------------------------------------------------------
00293   sparse_add_new_cut( new_col_H, new_cut, cut_length, nSel ) does the following:
00294 
00295     new_a = sum(data_X(:,find(new_cut ~=0 )),2);
00296     new_col_H = [sparse_A(:,1:nSel)'*new_a ; new_a'*new_a];
00297     sparse_A(:,nSel+1) = new_a;
00298 
00299   ---------------------------------------------------------------------------------*/
00300 void* CWDSVMOcas::add_new_cut_helper( void* ptr)
00301 {
00302     wdocas_thread_params_add* p = (wdocas_thread_params_add*) ptr;
00303     CWDSVMOcas* o = p->wdocas;
00304     int32_t start = p->start;
00305     int32_t end = p->end;
00306     int32_t string_length = o->string_length;
00307     //uint32_t nDim=(uint32_t) o->w_dim;
00308     uint32_t cut_length=p->cut_length;
00309     uint32_t* new_cut=p->new_cut;
00310     int32_t* w_offsets = o->w_offsets;
00311     float64_t* y = o->lab;
00312     int32_t alphabet_size = o->alphabet_size;
00313     float32_t* wd_weights = o->wd_weights;
00314     int32_t degree = o->degree;
00315     CStringFeatures<uint8_t>* f = o->features;
00316     float64_t normalization_const = o->normalization_const;
00317 
00318     // temporary vector
00319     float32_t* new_a = p->new_a;
00320     //float32_t* new_a = SG_MALLOC(float32_t, nDim);
00321     //memset(new_a, 0, sizeof(float32_t)*nDim);
00322 
00323     int32_t* val=SG_MALLOC(int32_t, cut_length);
00324     for (int32_t j=start; j<end; j++)
00325     {
00326         int32_t offs=o->w_dim_single_char*j;
00327         memset(val,0,sizeof(int32_t)*cut_length);
00328         int32_t lim=CMath::min(degree, string_length-j);
00329         int32_t len;
00330 
00331         for (int32_t k=0; k<lim; k++)
00332         {
00333             bool free_vec;
00334             uint8_t* vec = f->get_feature_vector(j+k, len, free_vec);
00335             float32_t wd = wd_weights[k]/normalization_const;
00336 
00337             for(uint32_t i=0; i < cut_length; i++)
00338             {
00339                 val[i]=val[i]*alphabet_size + vec[new_cut[i]];
00340                 new_a[offs+val[i]]+=wd * y[new_cut[i]];
00341             }
00342             offs+=w_offsets[k];
00343             f->free_feature_vector(vec, j+k, free_vec);
00344         }
00345     }
00346 
00347     //p->new_a=new_a;
00348     SG_FREE(val);
00349     return NULL;
00350 }
00351 
00352 int CWDSVMOcas::add_new_cut(
00353     float64_t *new_col_H, uint32_t *new_cut, uint32_t cut_length,
00354     uint32_t nSel, void* ptr)
00355 {
00356     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00357     int32_t string_length = o->string_length;
00358     uint32_t nDim=(uint32_t) o->w_dim;
00359     float32_t** cuts=o->cuts;
00360     float64_t* c_bias = o->cp_bias;
00361 
00362     uint32_t i;
00363     wdocas_thread_params_add* params_add=SG_MALLOC(wdocas_thread_params_add, o->parallel->get_num_threads());
00364     pthread_t* threads=SG_MALLOC(pthread_t, o->parallel->get_num_threads());
00365     float32_t* new_a=SG_MALLOC(float32_t, nDim);
00366     memset(new_a, 0, sizeof(float32_t)*nDim);
00367 
00368     int32_t t;
00369     int32_t nthreads=o->parallel->get_num_threads()-1;
00370     int32_t step= string_length/o->parallel->get_num_threads();
00371 
00372     if (step<1)
00373     {
00374         nthreads=string_length-1;
00375         step=1;
00376     }
00377 
00378 #ifdef HAVE_PTHREAD
00379     for (t=0; t<nthreads; t++)
00380     {
00381         params_add[t].wdocas=o;
00382         //params_add[t].new_a=NULL;
00383         params_add[t].new_a=new_a;
00384         params_add[t].new_cut=new_cut;
00385         params_add[t].start = step*t;
00386         params_add[t].end = step*(t+1);
00387         params_add[t].cut_length = cut_length;
00388 
00389         if (pthread_create(&threads[t], NULL, &CWDSVMOcas::add_new_cut_helper, (void*)&params_add[t]) != 0)
00390         {
00391             nthreads=t;
00392             SG_SWARNING("thread creation failed\n");
00393             break;
00394         }
00395     }
00396 
00397     params_add[t].wdocas=o;
00398     //params_add[t].new_a=NULL;
00399     params_add[t].new_a=new_a;
00400     params_add[t].new_cut=new_cut;
00401     params_add[t].start = step*t;
00402     params_add[t].end = string_length;
00403     params_add[t].cut_length = cut_length;
00404     add_new_cut_helper(&params_add[t]);
00405     //float32_t* new_a=params_add[t].new_a;
00406 
00407     for (t=0; t<nthreads; t++)
00408     {
00409         if (pthread_join(threads[t], NULL) != 0)
00410             SG_SWARNING( "pthread_join failed\n");
00411 
00412         //float32_t* a=params_add[t].new_a;
00413         //for (i=0; i<nDim; i++)
00414         //  new_a[i]+=a[i];
00415         //SG_FREE(a);
00416     }
00417 #endif /* HAVE_PTHREAD */
00418     for(i=0; i < cut_length; i++)
00419     {
00420         if (o->use_bias)
00421             c_bias[nSel]+=o->lab[new_cut[i]];
00422     }
00423 
00424     // insert new_a into the last column of sparse_A
00425     for(i=0; i < nSel; i++)
00426         new_col_H[i] = SGVector<float32_t>::dot(new_a, cuts[i], nDim) + c_bias[nSel]*c_bias[i];
00427     new_col_H[nSel] = SGVector<float32_t>::dot(new_a, new_a, nDim) + CMath::sq(c_bias[nSel]);
00428 
00429     cuts[nSel]=new_a;
00430     //CMath::display_vector(new_col_H, nSel+1, "new_col_H");
00431     //CMath::display_vector(cuts[nSel], nDim, "cut[nSel]");
00432     //
00433     SG_FREE(threads);
00434     SG_FREE(params_add);
00435 
00436     return 0;
00437 }
00438 
00439 int CWDSVMOcas::sort( float64_t* vals, float64_t* data, uint32_t size)
00440 {
00441     CMath::qsort_index(vals, data, size);
00442     return 0;
00443 }
00444 
00445 /*----------------------------------------------------------------------
00446   sparse_compute_output( output ) does the follwing:
00447 
00448   output = data_X'*W;
00449   ----------------------------------------------------------------------*/
00450 void* CWDSVMOcas::compute_output_helper(void* ptr)
00451 {
00452     wdocas_thread_params_output* p = (wdocas_thread_params_output*) ptr;
00453     CWDSVMOcas* o = p->wdocas;
00454     int32_t start = p->start;
00455     int32_t end = p->end;
00456     float32_t* out = p->out;
00457     float64_t* output = p->output;
00458     int32_t* val = p->val;
00459 
00460     CStringFeatures<uint8_t>* f=o->get_features();
00461 
00462     int32_t degree = o->degree;
00463     int32_t string_length = o->string_length;
00464     int32_t alphabet_size = o->alphabet_size;
00465     int32_t* w_offsets = o->w_offsets;
00466     float32_t* wd_weights = o->wd_weights;
00467     float32_t* w= o->w;
00468 
00469     float64_t* y = o->lab;
00470     float64_t normalization_const = o->normalization_const;
00471 
00472 
00473     for (int32_t j=0; j<string_length; j++)
00474     {
00475         int32_t offs=o->w_dim_single_char*j;
00476         for (int32_t i=start ; i<end; i++)
00477             val[i]=0;
00478 
00479         int32_t lim=CMath::min(degree, string_length-j);
00480         int32_t len;
00481 
00482         for (int32_t k=0; k<lim; k++)
00483         {
00484             bool free_vec;
00485             uint8_t* vec=f->get_feature_vector(j+k, len, free_vec);
00486             float32_t wd = wd_weights[k];
00487 
00488             for (int32_t i=start; i<end; i++) // quite fast 1.9s
00489             {
00490                 val[i]=val[i]*alphabet_size + vec[i];
00491                 out[i]+=wd*w[offs+val[i]];
00492             }
00493 
00494             /*for (int32_t i=0; i<nData/4; i++) // slowest 2s
00495             {
00496                 uint32_t x=((uint32_t*) vec)[i];
00497                 int32_t ii=4*i;
00498                 val[ii]=val[ii]*alphabet_size + (x&255);
00499                 val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
00500                 val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
00501                 val[ii+3]=val[ii+3]*alphabet_size + (x>>24);
00502                 out[ii]+=wd*w[offs+val[ii]];
00503                 out[ii+1]+=wd*w[offs+val[ii+1]];
00504                 out[ii+2]+=wd*w[offs+val[ii+2]];
00505                 out[ii+3]+=wd*w[offs+val[ii+3]];
00506             }*/
00507 
00508             /*for (int32_t i=0; i<nData>>3; i++) // fastest on 64bit: 1.5s
00509             {
00510                 uint64_t x=((uint64_t*) vec)[i];
00511                 int32_t ii=i<<3;
00512                 val[ii]=val[ii]*alphabet_size + (x&255);
00513                 val[ii+1]=val[ii+1]*alphabet_size + ((x>>8)&255);
00514                 val[ii+2]=val[ii+2]*alphabet_size + ((x>>16)&255);
00515                 val[ii+3]=val[ii+3]*alphabet_size + ((x>>24)&255);
00516                 val[ii+4]=val[ii+4]*alphabet_size + ((x>>32)&255);
00517                 val[ii+5]=val[ii+5]*alphabet_size + ((x>>40)&255);
00518                 val[ii+6]=val[ii+6]*alphabet_size + ((x>>48)&255);
00519                 val[ii+7]=val[ii+7]*alphabet_size + (x>>56);
00520                 out[ii]+=wd*w[offs+val[ii]];
00521                 out[ii+1]+=wd*w[offs+val[ii+1]];
00522                 out[ii+2]+=wd*w[offs+val[ii+2]];
00523                 out[ii+3]+=wd*w[offs+val[ii+3]];
00524                 out[ii+4]+=wd*w[offs+val[ii+4]];
00525                 out[ii+5]+=wd*w[offs+val[ii+5]];
00526                 out[ii+6]+=wd*w[offs+val[ii+6]];
00527                 out[ii+7]+=wd*w[offs+val[ii+7]];
00528             }*/
00529             offs+=w_offsets[k];
00530             f->free_feature_vector(vec, j+k, free_vec);
00531         }
00532     }
00533 
00534     for (int32_t i=start; i<end; i++)
00535         output[i]=y[i]*o->bias + out[i]*y[i]/normalization_const;
00536 
00537     //CMath::display_vector(o->w, o->w_dim, "w");
00538     //CMath::display_vector(output, nData, "out");
00539     return NULL;
00540 }
00541 
00542 int CWDSVMOcas::compute_output( float64_t *output, void* ptr )
00543 {
00544     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00545     int32_t nData=o->num_vec;
00546     wdocas_thread_params_output* params_output=SG_MALLOC(wdocas_thread_params_output, o->parallel->get_num_threads());
00547     pthread_t* threads = SG_MALLOC(pthread_t, o->parallel->get_num_threads());
00548 
00549     float32_t* out=SG_MALLOC(float32_t, nData);
00550     int32_t* val=SG_MALLOC(int32_t, nData);
00551     memset(out, 0, sizeof(float32_t)*nData);
00552 
00553     int32_t t;
00554     int32_t nthreads=o->parallel->get_num_threads()-1;
00555     int32_t step= nData/o->parallel->get_num_threads();
00556 
00557     if (step<1)
00558     {
00559         nthreads=nData-1;
00560         step=1;
00561     }
00562 #ifdef HAVE_PTHREAD
00563     for (t=0; t<nthreads; t++)
00564     {
00565         params_output[t].wdocas=o;
00566         params_output[t].output=output;
00567         params_output[t].out=out;
00568         params_output[t].val=val;
00569         params_output[t].start = step*t;
00570         params_output[t].end = step*(t+1);
00571 
00572         //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output);
00573         if (pthread_create(&threads[t], NULL, &CWDSVMOcas::compute_output_helper, (void*)&params_output[t]) != 0)
00574         {
00575             nthreads=t;
00576             SG_SWARNING("thread creation failed\n");
00577             break;
00578         }
00579     }
00580 
00581     params_output[t].wdocas=o;
00582     params_output[t].output=output;
00583     params_output[t].out=out;
00584     params_output[t].val=val;
00585     params_output[t].start = step*t;
00586     params_output[t].end = nData;
00587     compute_output_helper(&params_output[t]);
00588     //SG_SPRINT("t=%d start=%d end=%d output=%p\n", t, params_output[t].start, params_output[t].end, params_output[t].output);
00589 
00590     for (t=0; t<nthreads; t++)
00591     {
00592         if (pthread_join(threads[t], NULL) != 0)
00593             SG_SWARNING( "pthread_join failed\n");
00594     }
00595     SG_FREE(threads);
00596     SG_FREE(params_output);
00597     SG_FREE(val);
00598     SG_FREE(out);
00599 #endif /* HAVE_PTHREAD */
00600     return 0;
00601 }
00602 /*----------------------------------------------------------------------
00603   sq_norm_W = compute_W( alpha, nSel ) does the following:
00604 
00605   oldW = W;
00606   W = sparse_A(:,1:nSel)'*alpha;
00607   sq_norm_W = W'*W;
00608   dp_WoldW = W'*oldW';
00609 
00610   ----------------------------------------------------------------------*/
00611 void CWDSVMOcas::compute_W(
00612     float64_t *sq_norm_W, float64_t *dp_WoldW, float64_t *alpha, uint32_t nSel,
00613     void* ptr)
00614 {
00615     CWDSVMOcas* o = (CWDSVMOcas*) ptr;
00616     uint32_t nDim= (uint32_t) o->w_dim;
00617     CMath::swap(o->w, o->old_w);
00618     float32_t* W=o->w;
00619     float32_t* oldW=o->old_w;
00620     float32_t** cuts=o->cuts;
00621     memset(W, 0, sizeof(float32_t)*nDim);
00622     float64_t* c_bias = o->cp_bias;
00623     float64_t old_bias=o->bias;
00624     float64_t bias=0;
00625 
00626     for (uint32_t i=0; i<nSel; i++)
00627     {
00628         if (alpha[i] > 0)
00629             SGVector<float32_t>::vec1_plus_scalar_times_vec2(W, (float32_t) alpha[i], cuts[i], nDim);
00630 
00631         bias += c_bias[i]*alpha[i];
00632     }
00633 
00634     *sq_norm_W = SGVector<float32_t>::dot(W,W, nDim) +CMath::sq(bias);
00635     *dp_WoldW = SGVector<float32_t>::dot(W,oldW, nDim) + bias*old_bias;;
00636     //SG_PRINT("nSel=%d sq_norm_W=%f dp_WoldW=%f\n", nSel, *sq_norm_W, *dp_WoldW);
00637 
00638     o->bias = bias;
00639     o->old_bias = old_bias;
00640 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation