52         SG_ERROR(
"void glpkwrapper::setup(const int32_tnumkernels): input " 
   62       SG_ERROR(
"MKLMulticlassGradient::set_mkl_norm(float64_t norm) : parameter pnorm<1")
 
   78     double pi4=3.151265358979238/2;
 
   88         for(int32_t k=0; k< i+1 ;++k)
 
   90             weights[k]*=cos( std::min(
std::max(0.0,gammas[i]),pi4) );
 
   92         weights[i+1]=sin( std::min(
std::max(0.0,gammas[i]),pi4) );
 
   99             weights[i]=pow(weights[i],2.0/
pnorm);
 
  108     double pi4=3.151265358979238/2;
 
  111     std::fill(gammagradient.begin(),gammagradient.end(),0.0);
 
  120             for(int32_t k=0; k< std::min(i+1,dim+2) ;++k)
 
  122                 gammagradient[k]*=pow( cos( std::min(
std::max(0.0,gammas[i]),pi4) ), 2.0/
pnorm) ;
 
  125                 gammagradient[i+1]=pow( sin( std::min(
std::max(0.0,gammas[i]),pi4) ),2.0/
pnorm);
 
  130             for(int32_t k=0; k< i+1 ;++k)
 
  132                 gammagradient[k]*= pow( cos( std::min(
std::max(0.0,gammas[i]),pi4) ), 2.0/
pnorm-1.0)*(-1)*sin( std::min(
std::max(0.0,gammas[i]),pi4) );
 
  134             gammagradient[i+1]=pow( sin( std::min(
std::max(0.0,gammas[i]),pi4) ),2.0/
pnorm-1)*cos( std::min(
std::max(0.0,gammas[i]),pi4) );
 
  161     int32_t maxhalfiter=20;
 
  162     int32_t totaliters=6;
 
  165     std::vector<float64_t> finalgamma,curgamma;
 
  168     if(oldweights.empty())
 
  170     std::fill(curgamma.begin(),curgamma.end(),pi4/2);
 
  178          tmpbeta[i]=pow(oldweights[i],
pnorm/2);
 
  183             curgamma[i-1]=asin(tmpbeta[i]);
 
  187                 if( cos(curgamma[i])<=0)
 
  189                SG_SINFO(
"linesearch(...): at i %d cos(curgamma[i-1])<=0 %f\n",i, cos(curgamma[i-1]))
 
  196                 if(cos(curgamma[i-1])>0)
 
  198                     tmpbeta[k]/=cos(curgamma[i-1]);
 
  201                   SG_SINFO(
"linesearch(...): at k %d tmpbeta[k]>1 %f\n",k, tmpbeta[k])
 
  203                     tmpbeta[k]=std::min(1.0,
std::max(0.0, tmpbeta[k]));
 
  209                 for(
size_t i=0;i<curgamma.size();++i)
 
  211         SG_SINFO(
"linesearch(...): curgamma[i] %f\n",curgamma[i])
 
  220         std::vector<float64_t> curbeta;
 
  225         SG_SINFO(
"linesearch(...): objectives at i %f\n",minval)
 
  229         SG_SINFO(
"linesearch(...): objectives at i %f\n",tmpval)
 
  238         std::vector<float64_t> curgrad;
 
  241             ::std::vector<float64_t> gammagradient;
 
  243             curgrad.push_back(
objectives(gammagradient, minind));
 
  246         std::vector<float64_t> maxalphas(numkernels-1,0);
 
  248         for(int32_t i=0; i< numkernels-1 ;++i)
 
  250             maxgrad=
std::max(maxgrad,fabs(curgrad[i]) );
 
  253                 maxalphas[i]=(0-curgamma[i])/curgrad[i];
 
  255             else if(curgrad[i]>0)
 
  257                 maxalphas[i]=(pi4-curgamma[i])/curgrad[i];
 
  261                 maxalphas[i]=1024*1024;
 
  266         for(int32_t i=1; i< numkernels-1 ;++i)
 
  268             maxalpha=std::min(maxalpha,maxalphas[i]);
 
  271         if((maxalpha>1024*1023)|| (maxgrad<fingrad))
 
  281             float64_t leftalpha=0, rightalpha=maxalpha, midalpha=(leftalpha+rightalpha)/2;
 
  283             std::vector<float64_t> tmpgamma=curgamma, tmpbeta;
 
  284             for(int32_t i=1; i< numkernels-1 ;++i)
 
  286                 tmpgamma[i]=tmpgamma[i]+rightalpha*curgrad[i];
 
  292                 curobj=std::min(curobj,
objectives(tmpbeta, i));
 
  296             while((curobj < minval)&&(curhalfiter<maxhalfiter)&&(fabs(curobj/minval-1 ) > maxrelobjdiff ))
 
  299                 midalpha=(leftalpha+rightalpha)/2;
 
  302                 for(int32_t i=1; i< numkernels-1 ;++i)
 
  304                     tmpgamma[i]=tmpgamma[i]+rightalpha*curgrad[i];
 
  310                     curobj=std::min(curobj,
objectives(tmpbeta, i));
 
  322                 for(int32_t i=1; i< numkernels-1 ;++i)
 
  324                     tmpgamma[i]=tmpgamma[i]+midalpha*curgrad[i];
 
  330                     curobj=std::min(curobj,
objectives(tmpbeta, i));
 
  343                 midalpha=(leftalpha+rightalpha)/2;
 
  346             while(  fabs(curobj/tmpobj-1 ) > maxrelobjdiff  );
 
  351         if(longiters>= totaliters)
 
  360         nor+=pow(finalbeta[i],
pnorm);
 
  364         nor=pow(nor,1.0/
pnorm);
 
  378 int32_t num_kernels=(int)oldweights.size();
 
  379 int32_t nofKernelsGood=num_kernels;
 
  381 finalbeta=oldweights;
 
  383     for( int32_t p=0; p<num_kernels; ++p )
 
  386         if(  oldweights[p] >= 0.0 )
 
  396         ASSERT( finalbeta[p] >= 0 )
 
  401     for( int32_t p=0; p<num_kernels; ++p )
 
  406     for( int32_t p=0; p<num_kernels; ++p )
 
  411     for( int32_t p=0; p<num_kernels; ++p )
 
  412         preR += 
CMath::pow( oldweights[p] - finalbeta[p], 2.0 );
 
  418         SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
 
  419         SG_PRINT(
"MKL-direct: Z = %e\n", Z )
 
  420         SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
 
  421         for( int32_t p=0; p<num_kernels; ++p )
 
  424             SG_PRINT(
"MKL-direct: t[%3d] = %e  ( diff = %e = %e - %e )\n", p, t, oldweights[p]-finalbeta[p], oldweights[p], finalbeta[p] )
 
  426         SG_PRINT(
"MKL-direct: preR = %e\n", preR )
 
  429         SG_PRINT(
"MKL-direct: R = %e\n", R )
 
  430         SG_ERROR(
"Assertion R >= 0 failed!\n" )
 
  434     for( int32_t p=0; p<num_kernels; ++p )
 
  438         ASSERT( finalbeta[p] >= 0 )
 
  442     for( int32_t p=0; p<num_kernels; ++p )
 
  445         ASSERT( finalbeta[p] >= 0.0 )
 
  446         if( finalbeta[p] > 1.0 )
 
  454         SG_ERROR(
"MKLMulticlassGradient::computeweights(std::vector<float64_t> & weights2) : parameter pnorm<1")
 
  456     SG_SDEBUG(
"MKLMulticlassGradient::computeweights(...): pnorm %f\n",
pnorm)
 
  458     std::vector<float64_t> initw(weights2);
 
  461     SG_SINFO(
"MKLMulticlassGradient::computeweights(...): newweights \n")
 
  462     for(
size_t i=0;i<weights2.size();++i)
 
virtual void addconstraint(const ::std::vector< float64_t > &normw2, const float64_t sumofpositivealphas)
void linesearch(std::vector< float64_t > &finalbeta, const std::vector< float64_t > &oldweights)
::std::vector< ::std::vector< float64_t > > normsofsubkernels
virtual void computeweights(std::vector< float64_t > &weights2)
void genbetas(::std::vector< float64_t > &weights, const ::std::vector< float64_t > &gammas)
void gengammagradient(::std::vector< float64_t > &gammagradient, const ::std::vector< float64_t > &gammas, const int32_t dim)
virtual void set_mkl_norm(float64_t norm)
virtual void setup(const int32_t numkernels2)
float64_t objectives(const ::std::vector< float64_t > &weights, const int32_t index)
all of classes and functions are contained in the shogun namespace 
MKLMulticlassGradient operator=(MKLMulticlassGradient &gl)
Matrix::Scalar max(Matrix m)
virtual ~MKLMulticlassGradient()
::std::vector< float64_t > sumsofalphas
static float32_t sqrt(float32_t x)
void linesearch2(std::vector< float64_t > &finalbeta, const std::vector< float64_t > &oldweights)
static int32_t pow(bool x, int32_t n)
MKLMulticlassGradient is a helper class for MKLMulticlass.