00001 /* 00002 * This program is free software; you can redistribute it and/or modify 00003 * it under the terms of the GNU General Public License as published by 00004 * the Free Software Foundation; either version 3 of the License, or 00005 * (at your option) any later version. 00006 * 00007 * Written (W) 2009 Alexander Binder 00008 * Copyright (C) 2009 Fraunhofer Institute FIRST and Max-Planck-Society 00009 */ 00010 00011 #ifndef MKLMULTICLASSGRADIENT_H_ 00012 #define MKLMULTICLASSGRADIENT_H_ 00013 00014 #include <vector> 00015 #include <cmath> 00016 #include <cassert> 00017 #include "base/SGObject.h" 00018 #include "classifier/mkl/MKLMultiClassOptimizationBase.h" 00019 00020 00021 namespace shogun 00022 { 00028 class MKLMultiClassGradient: public MKLMultiClassOptimizationBase 00029 { 00030 public: 00034 MKLMultiClassGradient(); 00038 virtual ~MKLMultiClassGradient(); 00039 00043 MKLMultiClassGradient(MKLMultiClassGradient & gl); 00044 00048 MKLMultiClassGradient operator=(MKLMultiClassGradient & gl); 00049 00056 virtual void setup(const int32_t numkernels2); 00057 00066 virtual void addconstraint(const ::std::vector<float64_t> & normw2, 00067 const float64_t sumofpositivealphas); 00068 00074 virtual void computeweights(std::vector<float64_t> & weights2); 00075 00084 inline virtual const char* get_name() const 00085 { 00086 return "MKLMultiClassGradient"; 00087 } 00088 00092 virtual void set_mkl_norm(float64_t norm); 00093 00094 protected: 00101 void genbetas( ::std::vector<float64_t> & weights ,const ::std::vector<float64_t> & gammas); 00102 00110 void gengammagradient( ::std::vector<float64_t> & gammagradient ,const ::std::vector<float64_t> & gammas,const int32_t dim); 00117 float64_t objectives(const ::std::vector<float64_t> & weights, const int32_t index); 00124 void linesearch(std::vector<float64_t> & finalbeta,const std::vector<float64_t> & oldweights); 00125 00126 protected: 00128 int32_t numkernels; 00129 00131 ::std::vector< ::std::vector<float64_t> > normsofsubkernels; 00133 ::std::vector< float64_t > sumsofalphas ; 00135 float64_t pnorm; 00136 }; 00137 } 00138 00139 #endif