SHOGUN  4.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
ExponentialARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
16 
17 #ifdef HAVE_LINALG_LIB
19 #endif
20 
21 using namespace shogun;
22 
24 {
25  init();
26 }
27 
29 {
31 }
32 
33 void CExponentialARDKernel::init()
34 {
36 
39 
40  m_weights_rows=1.0;
41  m_weights_cols=1.0;
42 
43 
44  SG_ADD(&m_log_weights, "log_weights", "Feature weights in log domain", MS_AVAILABLE,
46 
47  SG_ADD(&m_weights_rows, "weights_rows", "Row of feature weights", MS_NOT_AVAILABLE);
48  SG_ADD(&m_weights_cols, "weights_cols", "Column of feature weights", MS_NOT_AVAILABLE);
49  SG_ADD((int *)(&m_ARD_type), "type", "ARD kernel type", MS_NOT_AVAILABLE);
50 
52  SG_ADD(&m_weights_raw, "weights_raw", "Features weights in standard domain", MS_NOT_AVAILABLE);
53 
54 }
55 
57 {
58  REQUIRE(hs, "Features not set!\n");
59  CDenseFeatures<float64_t> * dense_hs=dynamic_cast<CDenseFeatures<float64_t> *>(hs);
60  if (dense_hs)
61  return dense_hs->get_feature_vector(idx);
62 
63  CDotFeatures * dot_hs=dynamic_cast<CDotFeatures *>(hs);
64  REQUIRE(dot_hs, "Kernel only supports DotFeatures\n");
65  return dot_hs->get_computed_dot_feature_vector(idx);
66 
67 }
68 
69 #ifdef HAVE_LINALG_LIB
70 
71 void CExponentialARDKernel::set_weights(SGMatrix<float64_t> weights)
72 {
73  REQUIRE(weights.num_rows>0 && weights.num_cols>0, "Weights matrix is non-empty\n");
74  if (weights.num_rows==1)
75  {
76  if(weights.num_cols>1)
77  {
78  SGVector<float64_t> vec(weights.matrix,weights.num_cols,false);
79  set_vector_weights(vec);
80  }
81  else
82  set_scalar_weights(weights[0]);
83  }
84  else
85  set_matrix_weights(weights);
86 }
87 
88 void CExponentialARDKernel::lazy_update_weights()
89 {
91  {
93  {
96  [ ](float64_t& value)
97  {
98  return CMath::exp(value);
99  });
100  }
101  else if (m_ARD_type==KT_FULL)
102  {
105  index_t offset=0;
106  for (int i=0;i<m_weights_raw.num_cols && i<m_weights_raw.num_rows;i++)
107  {
109  std::copy(m_log_weights.vector+offset,m_log_weights.vector+offset+m_weights_raw.num_rows-i,begin+i);
110  begin[i]=CMath::exp(begin[i]);
111  offset+=m_weights_raw.num_rows-i;
112  }
113  }
114  else
115  {
116  SG_ERROR("Unsupported ARD type\n");
117  }
119  }
120 }
121 
122 SGMatrix<float64_t> CExponentialARDKernel::get_weights()
123 {
124  lazy_update_weights();
126 }
127 
128 void CExponentialARDKernel::set_scalar_weights(float64_t weight)
129 {
130  REQUIRE(weight>0, "Scalar (%f) weight should be positive\n",weight);
134 
135  m_weights_rows=1.0;
136  m_weights_cols=1.0;
137 }
138 
139 void CExponentialARDKernel::set_vector_weights(SGVector<float64_t> weights)
140 {
141  REQUIRE(rhs==NULL && lhs==NULL,
142  "Setting vector weights must be before initialize features\n");
143  REQUIRE(weights.vlen>0, "Vector weight should be non-empty\n");
145  for(index_t i=0; i<weights.vlen; i++)
146  {
147  REQUIRE(weights[i]>0, "Each entry of vector weight (v[%d]=%f) should be positive\n",
148  i,weights[i]);
149  m_log_weights[i]=CMath::log(weights[i]);
150  }
152 
153  m_weights_rows=1.0;
154  m_weights_cols=weights.vlen;
155 }
156 
157 void CExponentialARDKernel::set_matrix_weights(SGMatrix<float64_t> weights)
158 {
159  REQUIRE(rhs==NULL && lhs==NULL,
160  "Setting matrix weights must be before initialize features\n");
161  REQUIRE(weights.num_cols>0, "Matrix weight should be non-empty");
162  REQUIRE(weights.num_rows>=weights.num_cols,
163  "Number of row (%d) must be not less than number of column (%d)",
164  weights.num_rows, weights.num_cols);
165 
166  m_weights_rows=weights.num_rows;
167  m_weights_cols=weights.num_cols;
169  index_t len=(2*m_weights_rows+1-m_weights_cols)*m_weights_cols/2;
171 
172  index_t offset=0;
173  for (int i=0; i<weights.num_cols && i<weights.num_rows; i++)
174  {
175  float64_t* begin=weights.get_column_vector(i);
176  REQUIRE(begin[i]>0, "The diagonal entry of matrix weight (w(%d,%d)=%f) should be positive\n",
177  i,i,begin[i]);
178  std::copy(begin+i,begin+weights.num_rows,m_log_weights.vector+offset);
179  m_log_weights[offset]=CMath::log(m_log_weights[offset]);
180  offset+=weights.num_rows-i;
181  }
182 }
183 
185 {
186  init();
187 }
188 
190  CDotFeatures* r, int32_t size) : CDotKernel(size)
191 {
192  init();
193  init(l,r);
194 }
195 
196 bool CExponentialARDKernel::init(CFeatures* l, CFeatures* r)
197 {
198  cleanup();
199  CDotKernel::init(l, r);
200  int32_t dim=((CDotFeatures*) l)->get_dim_feature_space();
201  if (m_ARD_type==KT_FULL)
202  {
203  REQUIRE(m_weights_rows==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
204  dim, m_weights_rows);
205  }
206  else if (m_ARD_type==KT_DIAG)
207  {
208  REQUIRE(m_log_weights.vlen==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
209  dim, m_log_weights.vlen);
210  }
211  return init_normalizer();
212 }
213 
214 
215 SGMatrix<float64_t> CExponentialARDKernel::get_weighted_vector(SGVector<float64_t> vec)
216 {
217  REQUIRE(m_ARD_type==KT_FULL || m_ARD_type==KT_DIAG, "This method only supports vector weights or matrix weights\n");
219  if (m_ARD_type==KT_FULL)
220  {
222  index_t offset=0;
223  //can be done it in parallel
224  for (int i=0;i<m_weights_rows && i<m_weights_cols;i++)
225  {
226  SGMatrix<float64_t> weights(m_log_weights.vector+offset,1,m_weights_rows-i,false);
227  weights[0]=CMath::exp(weights[0]);
228  SGMatrix<float64_t> rtmp(vec.vector+i,vec.vlen-i,1,false);
229  SGMatrix<float64_t> s=linalg::matrix_product(weights,rtmp);
230  weights[0]=CMath::log(weights[0]);
231  res[i]=s[0];
232  offset+=m_weights_rows-i;
233  }
234  }
235  else
236  {
237  SGMatrix<float64_t> rtmp(vec.vector,vec.vlen,1,false);
239  [ ](float64_t& value)
240  {
241  return CMath::exp(value);
242  });
243  res=linalg::elementwise_product(weights, rtmp);
244  }
245  return res;
246 }
247 
248 SGMatrix<float64_t> CExponentialARDKernel::compute_right_product(SGVector<float64_t>vec,
249  float64_t & scalar_weight)
250 {
251  SGMatrix<float64_t> right;
252 
253  if (m_ARD_type==KT_SCALAR)
254  {
255  right=SGMatrix<float64_t>(vec.vector,vec.vlen,1,false);
256  scalar_weight*=CMath::exp(m_log_weights[0]);
257  }
258  else if (m_ARD_type==KT_DIAG || m_ARD_type==KT_FULL)
259  right=get_weighted_vector(vec);
260  else
261  {
262  SG_ERROR("Unsupported ARD type\n");
263  }
264  return right;
265 }
266 
267 void CExponentialARDKernel::check_weight_gradient_index(index_t index)
268 {
269  REQUIRE(lhs, "Left features not set!\n");
270  REQUIRE(rhs, "Right features not set!\n");
271 
272  if (m_ARD_type!=KT_SCALAR)
273  {
274  REQUIRE(index>=0, "Index (%d) must be non-negative\n",index);
275  REQUIRE(index<m_log_weights.vlen, "Index (%d) must be within #dimension of weights (%d)\n",
276  index, m_log_weights.vlen);
277  }
278 }
279 #endif //HAVE_LINALG_LIB
virtual void cleanup()
Definition: Kernel.cpp:173
SGVector< float64_t > m_log_weights
virtual void update_parameter_hash()
Definition: SGObject.cpp:248
ST * get_feature_vector(int32_t num, int32_t &len, bool &dofree)
int32_t index_t
Definition: common.h:62
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
auto elementwise_compute(Operand operand, UnaryOp unary_op) -> typename Operand::template container_type< decltype(unary_op(operand.data()[0]))>
index_t num_cols
Definition: SGMatrix.h:378
Features that support dot products among other operations.
Definition: DotFeatures.h:44
index_t num_rows
Definition: SGMatrix.h:376
SGMatrix< float64_t > m_weights_raw
Template class DotKernel is the base class for kernels working on DotFeatures.
Definition: DotKernel.h:31
index_t vlen
Definition: SGVector.h:494
double float64_t
Definition: common.h:50
virtual SGVector< float64_t > get_feature_vector(int32_t idx, CFeatures *hs)
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:115
virtual bool init_normalizer()
Definition: Kernel.cpp:168
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:1061
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:1059
The class Features is the base class of all feature objects.
Definition: Features.h:68
static float64_t exp(float64_t x)
Definition: Math.h:621
static float64_t log(float64_t v)
Definition: Math.h:922
SGVector< float64_t > get_computed_dot_feature_vector(int32_t num)
#define SG_ADD(...)
Definition: SGObject.h:81
void set_const(T const_elem)
Definition: SGMatrix.cpp:133
virtual bool parameter_hash_changed()
Definition: SGObject.cpp:262
void set_const(T const_elem)
Definition: SGVector.cpp:152

SHOGUN Machine Learning Toolbox - Documentation