23 using namespace shogun;
27 m_store_covs(store_covs), m_num_classes(0), m_dim(0)
77 SG_ERROR(
"Specified features are not of type CDotFeatures\n");
99 for ( k = 0 ; k < m_num_classes ; ++k )
102 for ( i = 0 ; i < num_vecs ; ++i )
104 vec = rf->get_feature_vector(i, vlen, vfree);
107 for ( j = 0 ; j < m_dim ; ++j )
108 X[i + j*num_vecs] = vec[j] - m_means[k*m_dim + j];
110 rf->free_feature_vector(vec, i, vfree);
114 cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, num_vecs, m_dim,
118 for ( i = 0 ; i < num_vecs ; ++i )
119 for ( j = 0 ; j < m_dim ; ++j )
120 norm2[i + k*num_vecs] +=
CMath::sq(A[i + j*num_vecs]);
123 CMath::display_matrix(A.
matrix, num_vecs, m_dim,
"A");
127 for ( i = 0 ; i < num_vecs ; ++i )
128 for ( k = 0 ; k < m_num_classes ; ++k )
130 norm2[i + k*num_vecs] += m_slog[k];
131 norm2[i + k*num_vecs] *= -0.5;
136 for ( i = 0 ; i < num_vecs ; ++i )
140 CMath::display_matrix(norm2.
vector, num_vecs, m_num_classes,
"norm2");
150 SG_ERROR(
"No labels allocated in QDA training\n");
155 SG_ERROR(
"Speficied features are not of type CDotFeatures\n");
159 SG_ERROR(
"No features allocated in QDA training\n");
161 if ( !train_labels.
vector )
162 SG_ERROR(
"No train_labels allocated in QDA training\n");
169 if ( num_vec != train_labels.
vlen )
170 SG_ERROR(
"Dimension mismatch between features and labels in QDA training");
172 int32_t* class_idxs =
SG_MALLOC(int32_t, num_vec*m_num_classes);
174 int32_t* class_nums =
SG_MALLOC(int32_t, m_num_classes);
175 memset(class_nums, 0, m_num_classes*
sizeof(int32_t));
178 for ( i = 0 ; i < train_labels.
vlen ; ++i )
180 class_idx = train_labels.
vector[i];
182 if ( class_idx < 0 || class_idx >= m_num_classes )
184 SG_ERROR(
"found label out of {0, 1, 2, ..., num_classes-1}...");
189 class_idxs[ class_idx*num_vec + class_nums[class_idx]++ ] = i;
193 for ( i = 0 ; i < m_num_classes ; ++i )
195 if ( class_nums[i] <= 0 )
197 SG_ERROR(
"What? One class with no elements\n");
208 cov_dims[2] = m_num_classes;
219 rot_dims[2] = m_num_classes;
229 for ( k = 0 ; k < m_num_classes ; ++k )
232 for ( i = 0 ; i < class_nums[k] ; ++i )
234 vec = rf->get_feature_vector(class_idxs[k*num_vec + i], vlen, vfree);
237 for ( j = 0 ; j < vlen ; ++j )
239 m_means[k*m_dim + j] += vec[j];
240 buffer[i + j*class_nums[k]] = vec[j];
243 rf->free_feature_vector(vec, class_idxs[k*num_vec + i], vfree);
246 for ( j = 0 ; j < m_dim ; ++j )
247 m_means[k*m_dim + j] /= class_nums[k];
249 for ( i = 0 ; i < class_nums[k] ; ++i )
250 for ( j = 0 ; j < m_dim ; ++j )
251 buffer[i + j*class_nums[k]] -= m_means[k*m_dim + j];
254 char jobu =
'N', jobvt =
'A';
255 int m = class_nums[k], n = m_dim;
256 int lda = m, ldu = m, ldvt = n;
262 rot_mat, ldvt, &info);
275 for ( i = 0 ; i < m_dim ; ++i )
276 for ( j = 0 ; j < m_dim ; ++j )
277 M[i + j*m_dim] *= scalings[k*m_dim + j];
279 cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, n, n, n, 1.0,
292 M_dims[2] = m_num_classes;
299 for ( k = 0 ; k < m_num_classes ; ++k )
301 for ( j = 0 ; j < m_dim ; ++j )
303 sinvsqrt[j] = 1.0 /
CMath::sqrt(scalings[k*m_dim + j]);
304 m_slog[k] +=
CMath::log(scalings[k*m_dim + j]);
307 for ( i = 0 ; i < m_dim ; ++i )
308 for ( j = 0 ; j < m_dim ; ++j )
310 idx = k*m_dim*m_dim + i + j*m_dim;
311 m_M[idx] = rotations[idx] * sinvsqrt[j];
316 SG_PRINT(
">>> QDA machine trained with %d classes\n", m_num_classes);
318 SG_PRINT(
"\n>>> Displaying means ...\n");
319 CMath::display_matrix(m_means.
matrix, m_dim, m_num_classes);
321 SG_PRINT(
"\n>>> Displaying scalings ...\n");
322 CMath::display_matrix(scalings.
matrix, m_dim, m_num_classes);
324 SG_PRINT(
"\n>>> Displaying rotations ... \n");
325 for ( k = 0 ; k < m_num_classes ; ++k )
326 CMath::display_matrix(rotations.
get_matrix(k), m_dim, m_dim);
328 SG_PRINT(
"\n>>> Displaying sinvsqrt ... \n");
331 SG_PRINT(
"\n>>> Diplaying m_M matrices ... \n");
332 for ( k = 0 ; k < m_num_classes ; ++k )
333 CMath::display_matrix(m_M.
get_matrix(k), m_dim, m_dim);