SHOGUN  4.0.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LinearARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
14 
15 #ifdef HAVE_LINALG_LIB
17 #endif
18 
19 using namespace shogun;
20 
22 {
23  initialize();
24 }
25 
27 {
29 }
30 
31 void CLinearARDKernel::initialize()
32 {
35  m_weights.set_const(1.0);
36  SG_ADD(&m_weights, "weights", "Feature weights", MS_AVAILABLE,
38 }
39 
40 #ifdef HAVE_LINALG_LIB
42 {
43  initialize();
44 }
45 
47  CDotFeatures* r, int32_t size) : CDotKernel(size)
48 {
49  initialize();
50  init(l,r);
51 }
52 
53 bool CLinearARDKernel::init(CFeatures* l, CFeatures* r)
54 {
55  cleanup();
56  CDotKernel::init(l, r);
57  int32_t dim=((CDotFeatures*) l)->get_dim_feature_space();
58  if (m_ARD_type==KT_FULL)
59  {
60  REQUIRE(m_weights.num_cols==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
61  dim, m_weights.num_cols);
62  }
63  else if (m_ARD_type==KT_DIAG)
64  {
65  REQUIRE(m_weights.num_rows==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
66  dim, m_weights.num_rows);
67  }
68  return init_normalizer();
69 }
70 
71 
72 SGMatrix<float64_t> CLinearARDKernel::compute_right_product(SGVector<float64_t>right_vec,
73  float64_t & scalar_weight)
74 {
75  SGMatrix<float64_t> right;
76 
77  if (m_ARD_type==KT_SCALAR)
78  {
79  right=SGMatrix<float64_t>(right_vec.vector,right_vec.vlen,1,false);
80  scalar_weight*=m_weights[0];
81  }
82  else
83  {
85 
86  SGMatrix<float64_t> rtmp(right_vec.vector,right_vec.vlen,1,false);
87 
88  if(m_ARD_type==KT_DIAG)
89  linalg::elementwise_product(m_weights, rtmp, right);
90  else if(m_ARD_type==KT_FULL)
91  linalg::matrix_product(m_weights, rtmp, right);
92  else
93  SG_ERROR("Unsupported ARD type\n");
94  }
95  return right;
96 }
97 
98 float64_t CLinearARDKernel::compute_helper(SGVector<float64_t> avec, SGVector<float64_t>bvec)
99 {
100  SGMatrix<float64_t> left;
101 
102  float64_t scalar_weight=1.0;
103  if (m_ARD_type==KT_SCALAR)
104  {
105  left=SGMatrix<float64_t>(avec.vector,1,avec.vlen,false);
106  scalar_weight=m_weights[0];
107  }
108  else
109  {
111 
112  SGMatrix<float64_t> ltmp(avec.vector,avec.vlen,1,false);
113 
114  SGMatrix<float64_t> left_transpose(left.matrix,left.num_cols,1,false);
115  if(m_ARD_type==KT_DIAG)
116  linalg::elementwise_product(m_weights, ltmp, left_transpose);
117  else if(m_ARD_type==KT_FULL)
118  linalg::matrix_product(m_weights, ltmp, left_transpose);
119  else
120  SG_ERROR("Unsupported ARD type\n");
121  }
122 
123  SGMatrix<float64_t> res(1,1);
124  SGMatrix<float64_t> right=compute_right_product(bvec, scalar_weight);
125  linalg::matrix_product(left, right, res);
126  return res[0]*scalar_weight;
127 }
128 
129 float64_t CLinearARDKernel::compute(int32_t idx_a, int32_t idx_b)
130 {
131  REQUIRE(lhs && rhs, "Features not set!\n")
132 
133  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(idx_a);
134  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(idx_b);
135 
136  return compute_helper(avec, bvec);
137 }
138 
139 float64_t CLinearARDKernel::compute_gradient_helper(SGVector<float64_t> avec,
140  SGVector<float64_t> bvec, float64_t scale, index_t index)
141 {
142  float64_t result;
143 
144  if(m_ARD_type==KT_DIAG)
145  {
146  result=2.0*avec[index]*bvec[index]*m_weights[index];
147  }
148  else
149  {
150  SGMatrix<float64_t> left(avec.vector,1,avec.vlen,false);
151  SGMatrix<float64_t> right(bvec.vector,bvec.vlen,1,false);
152  SGMatrix<float64_t> res(1,1);
153 
154  if (m_ARD_type==KT_SCALAR)
155  {
156  linalg::matrix_product(left, right, res);
157  result=2.0*res[0]*m_weights[0];
158  }
159  else if(m_ARD_type==KT_FULL)
160  {
161  int32_t row_index=index%m_weights.num_rows;
162  int32_t col_index=index/m_weights.num_rows;
163  //index is a linearized index of m_weights (column-major)
164  //m_weights is a d-by-p matrix, where p is #dimension of features
165  SGVector<float64_t> row_vec=m_weights.get_row_vector(row_index);
166  SGMatrix<float64_t> row_vec_r(row_vec.vector,row_vec.vlen,1,false);
167 
168  linalg::matrix_product(left, row_vec_r, res);
169  result=res[0]*bvec[col_index];
170 
171  SGMatrix<float64_t> row_vec_l(row_vec.vector,1,row_vec.vlen,false);
172  linalg::matrix_product(row_vec_l, right, res);
173  result+=res[0]*avec[col_index];
174 
175  }
176  else
177  {
178  SG_ERROR("Unsupported ARD type\n");
179  }
180 
181  }
182  return result*scale;
183 }
184 
185 
187  const TParameter* param, index_t index)
188 {
189  REQUIRE(lhs && rhs, "Features not set!\n");
190 
191  int32_t row_index, col_index;
192  if (m_ARD_type!=KT_SCALAR)
193  {
194  REQUIRE(index>=0, "Index (%d) must be non-negative\n",index);
195  if (m_ARD_type==KT_DIAG)
196  {
197  REQUIRE(index<m_weights.num_rows, "Index (%d) must be within #dimension of weights (%d)\n",
198  index, m_weights.num_rows);
199  }
200  else if(m_ARD_type==KT_FULL)
201  {
202  row_index=index%m_weights.num_rows;
203  col_index=index/m_weights.num_rows;
204  REQUIRE(row_index<m_weights.num_rows,
205  "Row index (%d) must be within #row of weights (%d)\n",
206  row_index, m_weights.num_rows);
207  REQUIRE(col_index<m_weights.num_cols,
208  "Column index (%d) must be within #column of weights (%d)\n",
209  col_index, m_weights.num_cols);
210  }
211  }
212  if (!strcmp(param->m_name, "weights"))
213  {
214  SGMatrix<float64_t> derivative(num_lhs, num_rhs);
215 
216  for (index_t j=0; j<num_lhs; j++)
217  {
218  SGVector<float64_t> avec=((CDotFeatures *)lhs)->get_computed_dot_feature_vector(j);
219  for (index_t k=0; k<num_rhs; k++)
220  {
221  SGVector<float64_t> bvec=((CDotFeatures *)rhs)->get_computed_dot_feature_vector(k);
222  derivative(j,k)=compute_gradient_helper(avec, bvec, 1.0, index);
223  }
224  }
225  return derivative;
226  }
227  else
228  {
229  SG_ERROR("Can't compute derivative wrt %s parameter\n", param->m_name);
230  return SGMatrix<float64_t>();
231  }
232 }
233 
234 SGMatrix<float64_t> CLinearARDKernel::get_weights()
235 {
237 }
238 
239 void CLinearARDKernel::set_weights(SGMatrix<float64_t> weights)
240 {
241  REQUIRE(weights.num_cols>0 && weights.num_rows>0,
242  "Weight Matrix (%d-by-%d) must not be empty\n",
243  weights.num_rows, weights.num_cols);
244  if (weights.num_cols>1)
245  {
247  }
248  else
249  {
250  if (weights.num_rows==1)
251  {
253  }
254  else
255  {
257  }
258  }
259  m_weights=weights;
260 }
261 
262 void CLinearARDKernel::set_scalar_weights(float64_t weight)
263 {
264  SGMatrix<float64_t> weights(1,1);
265  weights(0,0)=weight;
266  set_weights(weights);
267 }
268 
269 void CLinearARDKernel::set_vector_weights(SGVector<float64_t> weights)
270 {
271  SGMatrix<float64_t> weights_mat(weights.vlen,1);
272  std::copy(weights.vector, weights.vector+weights.vlen, weights_mat.matrix);
273  set_weights(weights_mat);
274 }
275 
276 void CLinearARDKernel::set_matrix_weights(SGMatrix<float64_t> weights)
277 {
278  set_weights(weights);
279 }
280 #endif //HAVE_LINALG_LIB

SHOGUN Machine Learning Toolbox - Documentation