32 num_feat_entries(num_entries), features(feats)
39 num_feat_entries(num_entries)
65 for (int32_t i = 0; i < num_feat_entries; i++)
67 if (features[i].feat_index < dim)
69 result += alpha * vec[features[i].feat_index] * features[i].entry;
80 REQUIRE(vec,
"vec must not be NULL\n");
84 for (int32_t i = 0; i < num_feat_entries; i++)
86 vec[features[i].feat_index] += alpha*
CMath::abs(features[i].entry);
91 for (int32_t i = 0; i < num_feat_entries; i++)
93 vec[features[i].feat_index] += alpha*features[i].entry;
99 template <
typename ST>
107 for (int32_t i = 0; i < num_feat_entries; i++)
109 if (features[i].feat_index < vec.
vlen)
110 result +=
static_cast<T
>(vec[features[i].feat_index])
125 return sparse_dot(*
this, v);
138 return dot_prod_expensive_unsorted(a, b);
174 int32_t dimensions = -1;
176 for (
index_t i = 0; i < num_feat_entries; i++)
178 if (features[i].feat_index > dimensions)
180 dimensions = features[i].feat_index;
184 return dimensions + 1;
190 if (!num_feat_entries)
198 int32_t * feat_idx = SG_MALLOC(int32_t, num_feat_entries);
200 for (
index_t j = 0; j < num_feat_entries; j++)
202 feat_idx[j] = features[j].feat_index;
208 for (
index_t j = 1; j < num_feat_entries; j++)
210 REQUIRE(features[j - 1].feat_index <= features[j].feat_index,
211 "sort_features(): failed sanity check %d <= %d after sorting (comparing indices features[%d] <= features[%d], features=%d)\n",
212 features[j - 1].feat_index, features[j].feat_index, j - 1, j, num_feat_entries);
216 int32_t last_index = 0;
218 for (
index_t j = 1; j < num_feat_entries; j++)
222 "sort_features(): target index %d must not exceed source index j=%d",
224 REQUIRE(features[last_index].feat_index <= features[j].feat_index,
225 "sort_features(): failed sanity check %d = features[%d].feat_index <= features[%d].feat_index = %d\n",
226 features[last_index].feat_index, last_index, j, features[j].feat_index);
229 if (features[last_index].feat_index == features[j].feat_index)
231 features[last_index].entry += features[j].entry;
236 if (features[last_index].entry != 0.0)
241 features[last_index] = features[j];
245 if (features[last_index].entry == 0.0)
250 int32_t new_feat_count = last_index + 1;
251 ASSERT(new_feat_count <= num_feat_entries);
256 SG_SINFO(
"shrinking vector from %d to %d\n", num_feat_entries, new_feat_count);
260 num_feat_entries = new_feat_count;
262 for (
index_t j = 1; j < num_feat_entries; j++)
264 REQUIRE(features[j - 1].feat_index < features[j].feat_index,
265 "sort_features(): failed sanity check %d < %d after sorting (comparing indices features[%d] < features[%d], features=%d)\n",
266 features[j - 1].feat_index, features[j].feat_index, j - 1, j, num_feat_entries);
272 ASSERT(old_features_ptr == features);
281 if (num_feat_entries == 0 || num_feat_entries == 1)
286 for (
index_t j = 1; j < num_feat_entries; j++)
288 if (features[j - 1].feat_index >= features[j].feat_index)
304 for (
index_t i = 0; i < num_feat_entries; i++)
305 if (features[i].feat_index == index)
307 ret += features[i].entry ;
324 for (
index_t i = 0; i < num_feat_entries; i++)
326 dense.
vector[features[i].feat_index] += features[i].entry;
341 REQUIRE(get_num_dimensions() <= dimension,
"get_dense(dimension=%d): sparse dimension %d exceeds requested dimension\n",
342 dimension, get_num_dimensions());
344 for (
index_t i = 0; i < num_feat_entries; i++)
346 dense.
vector[features[i].feat_index] += features[i].entry;
383 SG_SERROR(
"SGSparseVector::load():: Not supported for complex128_t\n");
389 SG_SERROR(
"SGSparseVector::save():: Not supported for complex128_t\n");
402 num_feat_entries = 0;
409 num_feat_entries = 0;
416 SG_SWARNING(
"Computing sparse_dot(a,b) on unsorted vectors is very expensive: O(n^2)\n");
417 SG_SWARNING(
"Using fallback to give correct results because upstream code does not sort.\n");
423 const T tmp = b.
features[b_idx].entry;
429 dot_prod += tmp * a.
features[a_idx].entry;
442 for (int32_t i = 0; i < num_feat_entries; i++)
444 SG_SPRINT(
"%s%s%d:%d", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry ? 1 : 0);
455 for (int32_t i = 0; i < num_feat_entries; i++)
457 SG_SPRINT(
"%s%s%d:%c", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
468 for (int32_t i = 0; i < num_feat_entries; i++)
470 SG_SPRINT(
"%s%s%d:%d", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
481 for (int32_t i = 0; i < num_feat_entries; i++)
483 SG_SPRINT(
"%s%s%d:%u", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
494 for (int32_t i = 0; i < num_feat_entries; i++)
496 SG_SPRINT(
"%s%s%d:%d", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
507 for (int32_t i = 0; i < num_feat_entries; i++)
509 SG_SPRINT(
"%s%s%d:%u", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
520 for (int32_t i = 0; i < num_feat_entries; i++)
522 SG_SPRINT(
"%s%s%d:%d", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
533 for (int32_t i = 0; i < num_feat_entries; i++)
535 SG_SPRINT(
"%s%s%d:%u", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
546 for (int32_t i = 0; i < num_feat_entries; i++)
548 SG_SPRINT(
"%s%s%d:%lld", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
559 for (int32_t i = 0; i < num_feat_entries; i++)
561 SG_SPRINT(
"%s%s%d:%llu ", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
572 for (int32_t i = 0; i < num_feat_entries; i++)
574 SG_SPRINT(
"%s%s%d:%g", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
585 for (int32_t i = 0; i < num_feat_entries; i++)
587 SG_SPRINT(
"%s%s%d:%.18g", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
598 for (int32_t i = 0; i < num_feat_entries; i++)
600 SG_SPRINT(
"%s%s%d:%.36Lg", prefix, i == 0 ?
"" :
" ", features[i].feat_index, features[i].entry);
611 for (int32_t i = 0; i < num_feat_entries; i++)
612 SG_SPRINT(
"%s%s%d:(%.18lg+i%.18lg)", prefix, i == 0 ?
"" :
" ", features[i].feat_index,
613 features[i].entry.real(), features[i].entry.imag());
void sort_features(bool stable_pointer=false)
std::complex< float64_t > complex128_t
T sparse_dot(const SGSparseVector< T > &v)
static void qsort_index(T1 *output, T2 *index, uint32_t size)
virtual void copy_data(const SGReferencedData &orig)
virtual void set_sparse_vector(const SGSparseVectorEntry< bool > *entries, int32_t num_feat)
static T dot_prod_expensive_unsorted(const SGSparseVector< T > &a, const SGSparseVector< T > &b)
void display_vector(const char *name="vector", const char *prefix="")
void add_to_dense(T alpha, T *vec, int32_t dim, bool abs_val=false)
shogun reference count managed data
A File access base class.
virtual void get_sparse_vector(SGSparseVectorEntry< bool > *&entries, int32_t &num_feat)
virtual ~SGSparseVector()
SGSparseVectorEntry< T > * features
T dense_dot(T alpha, T *vec, int32_t dim, T b)
all of classes and functions are contained in the shogun namespace
template class SGSparseVectorEntry
T get_feature(int32_t index)
template class SGSparseVector The assumtion is that the stored SGSparseVectorEntry* vector is orde...
SGVector< T > get_dense()
void resize_vector(int32_t n)
int32_t get_num_dimensions()
SGSparseVector< T > clone() const