SHOGUN  4.1.0
 全部  命名空间 文件 函数 变量 类型定义 枚举 枚举值 友元 宏定义  
LMNN.cpp
浏览该文件的文档.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2013 Fernando J. Iglesias Garcia
8  * Copyright (C) 2013 Fernando J. Iglesias Garcia
9  */
10 
11 #include <shogun/metric/LMNN.h>
12 
13 #ifdef HAVE_EIGEN3
14 
15 #include <shogun/metric/LMNNImpl.h>
17 
18 // useful shorthand to perform operations with Eigen matrices
19 // trace of the product of two matrices computed fast using trace(A*B)=sum(A.*B')
20 #define TRACE(A,B) (((A).array()*(B).transpose().array()).sum())
21 
22 using namespace shogun;
23 using namespace Eigen;
24 
26 {
27  init();
28 
29  m_statistics = new CLMNNStatistics();
30  SG_REF(m_statistics);
31 }
32 
34 {
35  init();
36 
37  m_features = features;
38  m_labels = labels;
39  m_k = k;
40 
41  SG_REF(m_features)
42  SG_REF(m_labels)
43 
44  m_statistics = new CLMNNStatistics();
45  SG_REF(m_statistics);
46 }
47 
49 {
50  SG_UNREF(m_features)
51  SG_UNREF(m_labels)
52  SG_UNREF(m_statistics);
53 }
54 
55 const char* CLMNN::get_name() const
56 {
57  return "LMNN";
58 }
59 
60 void CLMNN::train(SGMatrix<float64_t> init_transform)
61 {
62  SG_DEBUG("Entering CLMNN::train().\n")
63 
64  // Check training data and arguments, initializing, if necessary, init_transform
65  CLMNNImpl::check_training_setup(m_features, m_labels, init_transform);
66 
67  // Initializations
68 
69  // cast is safe, check_training_setup ensures features are dense
70  CDenseFeatures<float64_t>* x = static_cast<CDenseFeatures<float64_t>*>(m_features);
72  SG_DEBUG("%d input vectors with %d dimensions.\n", x->get_num_vectors(), x->get_num_features());
73 
74  // Use Eigen matrix for the linear transform L. The Mahalanobis distance is L^T*L
75  MatrixXd L = Map<MatrixXd>(init_transform.matrix, init_transform.num_rows,
76  init_transform.num_cols);
77  // Compute target or genuine neighbours
78  SG_DEBUG("Finding target nearest neighbors.\n")
79  SGMatrix<index_t> target_nn = CLMNNImpl::find_target_nn(x, y, m_k);
80  // Initialize (sub-)gradient
81  SG_DEBUG("Summing outer products for (sub-)gradient initialization.\n")
82  MatrixXd gradient = (1-m_regularization)*CLMNNImpl::sum_outer_products(x, target_nn);
83  // Value of the objective function at every iteration
84  SGVector<float64_t> obj(m_maxiter);
85  // The step size is modified depending on how the objective changes, leave the
86  // step size member unchanged and use a local one
87  float64_t stepsize = m_stepsize;
88  // Last active set of impostors computed exactly, current and previous impostors sets
89  ImpostorsSetType exact_impostors, cur_impostors, prev_impostors;
90  // Iteration counter
91  uint32_t iter = 0;
92  // Criterion for termination
93  bool stop = false;
94  // Make space for the training statistics
95  m_statistics->resize(m_maxiter);
96 
97  // Main loop
98  while (!stop)
99  {
100  SG_PROGRESS(iter, 0, m_maxiter)
101 
102  // Find current set of impostors
103  SG_DEBUG("Finding impostors.\n")
104  cur_impostors = CLMNNImpl::find_impostors(x,y,L,target_nn,iter,m_correction);
105  SG_DEBUG("Found %d impostors in the current set.\n", cur_impostors.size())
106 
107  // (Sub-) gradient computation
108  SG_DEBUG("Updating gradient.\n")
109  CLMNNImpl::update_gradient(x, gradient, cur_impostors, prev_impostors, m_regularization);
110  // Take gradient step
111  SG_DEBUG("Taking gradient step.\n")
112  CLMNNImpl::gradient_step(L, gradient, stepsize, m_diagonal);
113 
114  // Compute the objective, trace of Mahalanobis distance matrix (L squared) times the gradient
115  // plus the number of current impostors to account for the margin
116  SG_DEBUG("Computing objective.\n")
117  obj[iter] = TRACE(L.transpose()*L,gradient) + m_regularization*cur_impostors.size();
118 
119  // Correct step size
120  CLMNNImpl::correct_stepsize(stepsize, obj, iter);
121 
122  // Check termination criterion
123  stop = CLMNNImpl::check_termination(stepsize, obj, iter, m_maxiter, m_stepsize_threshold, m_obj_threshold);
124 
125  // Update iteration counter
126  iter = iter + 1;
127  // Update previous set of impostors
128  prev_impostors = cur_impostors;
129 
130  // Store statistics for this iteration
131  m_statistics->set(iter-1, obj[iter-1], stepsize, cur_impostors.size());
132 
133  SG_DEBUG("iteration=%d, objective=%.4f, #impostors=%4d, stepsize=%.4E\n",
134  iter, obj[iter-1], cur_impostors.size(), stepsize)
135  }
136 
137  // Truncate statistics in case convergence was reached in less than maxiter
138  m_statistics->resize(iter);
139 
140  // Store the transformation found in the class attribute
141  int32_t nfeats = x->get_num_features();
142  float64_t* cloned_data = SGMatrix<float64_t>::clone_matrix(L.data(), nfeats, nfeats);
143  m_linear_transform = SGMatrix<float64_t>(cloned_data, nfeats, nfeats);
144 
145  SG_DEBUG("Leaving CLMNN::train().\n")
146 }
147 
149 {
150  return m_linear_transform;
151 }
152 
154 {
155  // Compute Mahalanobis distance matrix M = L^T*L
156 
157  // Put the linear transform L in Eigen to perform the matrix multiplication
158  // L is not copied to another region of memory
159  Map<const MatrixXd> map_linear_transform(m_linear_transform.matrix,
160  m_linear_transform.num_rows, m_linear_transform.num_cols);
161  // TODO exploit that M is symmetric
162  MatrixXd M = map_linear_transform.transpose()*map_linear_transform;
163  // TODO avoid copying
164  SGMatrix<float64_t> mahalanobis_matrix(M.rows(), M.cols());
165  for (index_t i = 0; i < M.rows(); i++)
166  for (index_t j = 0; j < M.cols(); j++)
167  mahalanobis_matrix(i,j) = M(i,j);
168 
169  // Create custom Mahalanobis distance with matrix M associated with the training features
170 
172  new CCustomMahalanobisDistance(m_features, m_features, mahalanobis_matrix);
173  SG_REF(distance)
174 
175  return distance;
176 }
177 
178 int32_t CLMNN::get_k() const
179 {
180  return m_k;
181 }
182 
183 void CLMNN::set_k(const int32_t k)
184 {
185  REQUIRE(k>0, "The number of target neighbors per example must be larger than zero\n");
186  m_k = k;
187 }
188 
190 {
191  return m_regularization;
192 }
193 
194 void CLMNN::set_regularization(const float64_t regularization)
195 {
196  m_regularization = regularization;
197 }
198 
200 {
201  return m_stepsize;
202 }
203 
204 void CLMNN::set_stepsize(const float64_t stepsize)
205 {
206  REQUIRE(stepsize>0, "The step size used in gradient descent must be larger than zero\n")
207  m_stepsize = stepsize;
208 }
209 
211 {
212  return m_stepsize_threshold;
213 }
214 
215 void CLMNN::set_stepsize_threshold(const float64_t stepsize_threshold)
216 {
217  REQUIRE(stepsize_threshold>0,
218  "The threshold for the step size must be larger than zero\n")
219  m_stepsize_threshold = stepsize_threshold;
220 }
221 
222 uint32_t CLMNN::get_maxiter() const
223 {
224  return m_maxiter;
225 }
226 
227 void CLMNN::set_maxiter(const uint32_t maxiter)
228 {
229  REQUIRE(maxiter>0, "The number of maximum iterations must be larger than zero\n")
230  m_maxiter = maxiter;
231 }
232 
233 uint32_t CLMNN::get_correction() const
234 {
235  return m_correction;
236 }
237 
238 void CLMNN::set_correction(const uint32_t correction)
239 {
240  m_correction = correction;
241 }
242 
244 {
245  return m_obj_threshold;
246 }
247 
248 void CLMNN::set_obj_threshold(const float64_t obj_threshold)
249 {
250  REQUIRE(obj_threshold>0,
251  "The threshold for the objective must be larger than zero\n")
252  m_obj_threshold = obj_threshold;
253 }
254 
256 {
257  return m_diagonal;
258 }
259 
260 void CLMNN::set_diagonal(const bool diagonal)
261 {
262  m_diagonal = diagonal;
263 }
264 
266 {
267  SG_REF(m_statistics);
268  return m_statistics;
269 }
270 
271 void CLMNN::init()
272 {
273  SG_ADD(&m_linear_transform, "linear_transform",
274  "Linear transform in matrix form", MS_NOT_AVAILABLE)
275  SG_ADD((CSGObject**) &m_features, "features", "Training features",
277  SG_ADD((CSGObject**) &m_labels, "labels", "Training labels",
279  SG_ADD(&m_k, "k", "Number of target neighbours per example",
281  SG_ADD(&m_regularization, "regularization", "Regularization",
282  MS_AVAILABLE)
283  SG_ADD(&m_stepsize, "stepsize", "Step size in gradient descent",
285  SG_ADD(&m_stepsize_threshold, "stepsize_threshold", "Step size threshold",
287  SG_ADD(&m_maxiter, "maxiter", "Maximum number of iterations",
289  SG_ADD(&m_correction, "correction",
290  "Iterations between exact impostors search", MS_NOT_AVAILABLE)
291  SG_ADD(&m_obj_threshold, "obj_threshold", "Objective threshold",
293  SG_ADD(&m_diagonal, "m_diagonal", "Diagonal transformation", MS_NOT_AVAILABLE);
294  SG_ADD((CSGObject**) &m_statistics, "statistics", "Training statistics",
295  MS_NOT_AVAILABLE);
296 
297  m_features = NULL;
298  m_labels = NULL;
299  m_k = 1;
300  m_regularization = 0.5;
301  m_stepsize = 1e-07;
302  m_stepsize_threshold = 1e-22;
303  m_maxiter = 1000;
304  m_correction = 15;
305  m_obj_threshold = 1e-9;
306  m_diagonal = false;
307  m_statistics = NULL;
308 }
309 
311 {
312  init();
313 }
314 
316 {
317 }
318 
319 const char* CLMNNStatistics::get_name() const
320 {
321  return "LMNNStatistics";
322 }
323 
324 void CLMNNStatistics::resize(int32_t size)
325 {
326  REQUIRE(size > 0, "The new size in CLMNNStatistics::resize must be larger than zero."
327  " Given value is %d.\n", size);
328 
329  obj.resize_vector(size);
330  stepsize.resize_vector(size);
331  num_impostors.resize_vector(size);
332 }
333 
334 void CLMNNStatistics::set(index_t iter, float64_t obj_iter, float64_t stepsize_iter,
335  uint32_t num_impostors_iter)
336 {
337  REQUIRE(iter >= 0 && iter < obj.vlen, "The iteration index in CLMNNStatistics::set "
338  "must be larger or equal to zero and less than the size (%d). Given valu is %d.\n", obj.vlen, iter);
339 
340  obj[iter] = obj_iter;
341  stepsize[iter] = stepsize_iter;
342  num_impostors[iter] = num_impostors_iter;
343 }
344 
345 void CLMNNStatistics::init()
346 {
347  SG_ADD(&obj, "obj", "Objective at each iteration", MS_NOT_AVAILABLE);
348  SG_ADD(&stepsize, "stepsize", "Step size at each iteration", MS_NOT_AVAILABLE);
349  SG_ADD(&num_impostors, "num_impostors", "Number of impostors at each iteration",
351 }
352 
353 #endif /* HAVE_EIGEN3 */
CCustomMahalanobisDistance * get_distance() const
Definition: LMNN.cpp:153
float distance(CJLCoverTreePoint p1, CJLCoverTreePoint p2, float64_t upper_bound)
#define TRACE(A, B)
Definition: LMNN.cpp:20
virtual const char * get_name() const
Definition: LMNN.cpp:55
void set_regularization(const float64_t regularization)
Definition: LMNN.cpp:194
void set_diagonal(const bool diagonal)
Definition: LMNN.cpp:260
virtual const char * get_name() const
Definition: LMNN.cpp:319
float64_t get_obj_threshold() const
Definition: LMNN.cpp:243
bool get_diagonal() const
Definition: LMNN.cpp:255
int32_t get_num_features() const
int32_t index_t
Definition: common.h:62
#define SG_PROGRESS(...)
Definition: SGIO.h:142
float64_t get_stepsize_threshold() const
Definition: LMNN.cpp:210
float64_t get_stepsize() const
Definition: LMNN.cpp:199
Definition: SGMatrix.h:20
#define REQUIRE(x,...)
Definition: SGIO.h:206
index_t num_cols
Definition: SGMatrix.h:378
Class LMNNStatistics used to give access to intermediate results obtained training LMNN...
Definition: LMNN.h:250
#define SG_REF(x)
Definition: SGObject.h:51
index_t num_rows
Definition: SGMatrix.h:376
float64_t get_regularization() const
Definition: LMNN.cpp:189
void set(index_t iter, float64_t obj_iter, float64_t stepsize_iter, uint32_t num_impostors_iter)
Definition: LMNN.cpp:334
static T * clone_matrix(const T *matrix, int32_t nrows, int32_t ncols)
Definition: SGMatrix.cpp:267
Multiclass Labels for multi-class classification.
int32_t size() const
Definition: SGVector.h:115
index_t vlen
Definition: SGVector.h:494
uint32_t get_correction() const
Definition: LMNN.cpp:233
void set_maxiter(const uint32_t maxiter)
Definition: LMNN.cpp:227
void set_stepsize_threshold(const float64_t stepsize_threshold)
Definition: LMNN.cpp:215
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual int32_t get_num_vectors() const
CLMNNStatistics * get_statistics() const
Definition: LMNN.cpp:265
double float64_t
Definition: common.h:50
void set_k(const int32_t k)
Definition: LMNN.cpp:183
Class CustomMahalanobisDistance used to compute the distance between feature vectors and as ...
virtual ~CLMNNStatistics()
Definition: LMNN.cpp:315
void resize(int32_t size)
Definition: LMNN.cpp:324
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
SGMatrix< float64_t > get_linear_transform() const
Definition: LMNN.cpp:148
void set_obj_threshold(const float64_t obj_threshold)
Definition: LMNN.cpp:248
void train(SGMatrix< float64_t > init_transform=SGMatrix< float64_t >())
Definition: LMNN.cpp:60
void set_correction(const uint32_t correction)
Definition: LMNN.cpp:238
uint32_t get_maxiter() const
Definition: LMNN.cpp:222
virtual ~CLMNN()
Definition: LMNN.cpp:48
void set_stepsize(const float64_t stepsize)
Definition: LMNN.cpp:204
void resize_vector(int32_t n)
Definition: SGVector.cpp:259
#define SG_ADD(...)
Definition: SGObject.h:81
int32_t get_k() const
Definition: LMNN.cpp:178
static CMulticlassLabels * to_multiclass(CLabels *base_labels)

SHOGUN 机器学习工具包 - 项目文档