SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
GaussianARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
15 
16 #ifdef HAVE_LINALG_LIB
18 #endif
19 
20 using namespace shogun;
21 
23 {
24  initialize();
25 }
26 
28 {
29 }
30 
31 void CGaussianARDKernel::initialize()
32 {
33  set_width(1.0);
34  SG_ADD(&m_width, "width", "Kernel width", MS_AVAILABLE, GRADIENT_AVAILABLE);
35 }
36 
37 #ifdef HAVE_LINALG_LIB
39  : CLinearARDKernel(size)
40 {
41  initialize();
42  set_width(width);
43 }
44 
46  CDotFeatures* r, int32_t size, float64_t width)
47  : CLinearARDKernel(size)
48 {
49  initialize();
50  set_width(width);
51 }
52 
53 bool CGaussianARDKernel::init(CFeatures* l, CFeatures* r)
54 {
55  return CLinearARDKernel::init(l,r);
56 }
57 
59 {
60  if (kernel->get_kernel_type()!=K_GAUSSIANARD)
61  {
62  SG_SERROR("Provided kernel is not of type CGaussianARDKernel!\n");
63  }
64 
65  /* since an additional reference is returned */
66  SG_REF(kernel);
67  return (CGaussianARDKernel*)kernel;
68 }
69 
70 float64_t CGaussianARDKernel::compute(int32_t idx_a, int32_t idx_b)
71 {
72  float64_t result=distance(idx_a,idx_b);
73  return CMath::exp(-result);
74 }
75 
77  const TParameter* param, index_t index)
78 {
79  REQUIRE(param, "Param not set\n");
80  REQUIRE(lhs , "Left features not set!\n");
81  REQUIRE(rhs, "Right features not set!\n");
82 
83  if (lhs==rhs)
84  {
85  if (!strcmp(param->m_name, "weights") || !strcmp(param->m_name, "width"))
86  {
87  SGVector<float64_t> derivative(num_lhs);
88  derivative.zero();
89  return derivative;
90  }
91  }
92  else
93  {
94  int32_t length=CMath::min(num_lhs, num_rhs);
95  SGVector<float64_t> derivative(length);
96 
97  for (index_t j=0; j<length; j++)
98  {
99  if (!strcmp(param->m_name, "weights") )
100  {
101  check_weight_gradient_index(index);
104  derivative[j]=get_parameter_gradient_helper(param,index,j,j,avec,bvec);
105  }
106  else if (!strcmp(param->m_name, "width"))
107  {
108  SGVector<float64_t> avec, bvec;
109  derivative[j]=get_parameter_gradient_helper(param,index,j,j,avec,bvec);
110  }
111  }
112  return derivative;
113  }
114 
115  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
116  return SGVector<float64_t>();
117 }
118 
119 
120 float64_t CGaussianARDKernel::get_parameter_gradient_helper(
121  const TParameter* param, index_t index, int32_t idx_a,
122  int32_t idx_b, SGVector<float64_t> avec, SGVector<float64_t> bvec)
123 {
124  REQUIRE(param, "Param not set\n");
125 
126  if (!strcmp(param->m_name, "weights"))
127  {
128  bvec=linalg::add(avec, bvec, 1.0, -1.0);
129  float64_t scale=-kernel(idx_a,idx_b)/m_width;
130  return compute_gradient_helper(bvec, bvec, scale, index);
131  }
132  else if (!strcmp(param->m_name, "width"))
133  {
134  float64_t tmp=kernel(idx_a,idx_b);
135  if (tmp<=CMath::MACHINE_EPSILON)
136  return 0.0;
137  return -tmp*CMath::log(tmp)/m_width;
138  }
139  else
140  {
141  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
142  return 0.0;
143  }
144 }
145 
147  const TParameter* param, index_t index)
148 {
149  REQUIRE(param, "Param not set\n");
150  REQUIRE(lhs , "Left features not set!\n");
151  REQUIRE(rhs, "Right features not set!\n");
152 
153  if (!strcmp(param->m_name, "weights"))
154  {
155  SGMatrix<float64_t> derivative(num_lhs, num_rhs);
156  for (index_t j=0; j<num_lhs; j++)
157  {
159  for (index_t k=0; k<num_rhs; k++)
160  {
162  derivative(j,k)=get_parameter_gradient_helper(param,index,j,k,avec,bvec);
163  }
164  }
165  return derivative;
166  }
167  else if (!strcmp(param->m_name, "width"))
168  {
169  SGMatrix<float64_t> derivative(num_lhs, num_rhs);
170 
171  for (index_t j=0; j<num_lhs; j++)
172  {
173  for (index_t k=0; k<num_rhs; k++)
174  {
175  SGVector<float64_t> avec, bvec;
176  derivative(j,k)=get_parameter_gradient_helper(param,index,j,k,avec,bvec);
177  }
178  }
179  return derivative;
180  }
181  else
182  {
183  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
184  return SGMatrix<float64_t>();
185  }
186 }
187 
188 float64_t CGaussianARDKernel::distance(int32_t idx_a, int32_t idx_b)
189 {
190  REQUIRE(lhs, "Left features (lhs) not set!\n")
191  REQUIRE(rhs, "Right features (rhs) not set!\n")
192 
193  if (lhs==rhs && idx_a==idx_b)
194  return 0.0;
195 
197  SGVector<float64_t> bvec=get_feature_vector(idx_b, rhs);
198  avec=linalg::add(avec, bvec, 1.0, -1.0);
199  float64_t result=compute_helper(avec, avec);
200  return result/m_width;
201 }
202 #endif /* HAVE_LINALG_LIB */
float distance(CJLCoverTreePoint p1, CJLCoverTreePoint p2, float64_t upper_bound)
static const float64_t MACHINE_EPSILON
Definition: Math.h:2058
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:1069
Linear Kernel with Automatic Relevance Detection computed on CDotFeatures.
parameter struct
Definition: Parameter.h:32
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:206
virtual float64_t compute(int32_t idx_a, int32_t idx_b)
Definition: DotKernel.h:123
virtual SGVector< float64_t > get_feature_vector(int32_t idx, CFeatures *hs)
Features that support dot products among other operations.
Definition: DotFeatures.h:44
#define SG_REF(x)
Definition: SGObject.h:51
Gaussian Kernel with Automatic Relevance Detection computed on CDotFeatures.
virtual void set_width(float64_t w)
void add(Matrix A, Matrix B, Matrix C, typename Matrix::Scalar alpha=1.0, typename Matrix::Scalar beta=1.0)
Definition: Core.h:65
double float64_t
Definition: common.h:50
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:1067
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1061
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:884
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual EKernelType get_kernel_type()=0
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1059
The class Features is the base class of all feature objects.
Definition: Features.h:68
static T min(T a, T b)
Definition: Math.h:157
#define SG_SERROR(...)
Definition: SGIO.h:179
void scale(Matrix A, Matrix B, typename Matrix::Scalar alpha)
Definition: Core.h:93
static float64_t exp(float64_t x)
Definition: Math.h:621
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
Definition: Kernel.h:850
static float64_t log(float64_t v)
Definition: Math.h:922
The Kernel base class.
Definition: Kernel.h:158
#define SG_ADD(...)
Definition: SGObject.h:81
virtual SGVector< float64_t > get_parameter_gradient_diagonal(const TParameter *param, index_t index=-1)
Definition: Kernel.h:864

SHOGUN Machine Learning Toolbox - Documentation