18 num_feat_entries(num_entries), features(feats)
25 num_feat_entries(num_entries)
51 for (int32_t i=0; i<num_feat_entries; i++)
53 if (features[i].feat_index<dim)
54 result+=alpha*vec[features[i].feat_index]*features[i].entry;
62 template <
typename ST>
70 for (int32_t i=0; i<num_feat_entries; i++)
72 if (features[i].feat_index<vec.
vlen)
73 result+=
static_cast<T
>(vec[features[i].feat_index])
88 return sparse_dot(*
this, v);
101 return dot_prod_symmetric(a, b);
105 return dot_prod_asymmetric(a, b);
109 return dot_prod_asymmetric(b, a);
119 int32_t dimensions = -1;
120 for (
index_t i=0; i<num_feat_entries; i++)
122 if (features[i].feat_index > dimensions)
124 dimensions = features[i].feat_index;
134 if (!num_feat_entries)
140 int32_t* feat_idx=SG_MALLOC(int32_t, num_feat_entries);
141 for (
index_t j=0; j<num_feat_entries; j++)
143 feat_idx[j]=features[j].feat_index;
149 for (
index_t j=1; j<num_feat_entries; j++)
151 REQUIRE(features[j-1].feat_index <= features[j].feat_index,
152 "sort_features(): failed sanity check %d <= %d after sorting (comparing indices features[%d] <= features[%d], features=%d)\n",
153 features[j-1].feat_index, features[j].feat_index, j-1, j, num_feat_entries);
157 int32_t last_index = 0;
158 for (
index_t j=1; j<num_feat_entries; j++)
162 "sort_features(): target index %d must not exceed source index j=%d",
164 REQUIRE(features[last_index].feat_index <= features[j].feat_index,
165 "sort_features(): failed sanity check %d = features[%d].feat_index <= features[%d].feat_index = %d\n",
166 features[last_index].feat_index, last_index, j, features[j].feat_index);
169 if (features[last_index].feat_index == features[j].feat_index)
171 features[last_index].entry += features[j].entry;
176 if (features[last_index].entry != 0.0)
181 features[last_index] = features[j];
185 if (features[last_index].entry == 0.0)
190 int32_t new_feat_count = last_index+1;
191 ASSERT(new_feat_count <= num_feat_entries);
196 SG_SINFO(
"shrinking vector from %d to %d\n", num_feat_entries, new_feat_count);
199 num_feat_entries = new_feat_count;
201 for (
index_t j=1; j<num_feat_entries; j++)
203 REQUIRE(features[j-1].feat_index < features[j].feat_index,
204 "sort_features(): failed sanity check %d < %d after sorting (comparing indices features[%d] < features[%d], features=%d)\n",
205 features[j-1].feat_index, features[j].feat_index, j-1, j, num_feat_entries);
209 if (stable_pointer) {
210 ASSERT(old_features_ptr == features);
221 for (
index_t i=0; i<num_feat_entries; i++)
222 if (features[i].feat_index==index)
223 ret+=features[i].entry ;
239 for (
index_t i=0; i<num_feat_entries; i++)
241 dense.
vector[features[i].feat_index] += features[i].entry;
256 REQUIRE(get_num_dimensions() <= dimension,
"get_dense(dimension=%d): sparse dimension %d exceeds requested dimension\n",
257 dimension, get_num_dimensions());
259 for (
index_t i=0; i<num_feat_entries; i++)
261 dense.
vector[features[i].feat_index] += features[i].entry;
298 SG_SERROR(
"SGSparseVector::load():: Not supported for complex128_t\n");
304 SG_SERROR(
"SGSparseVector::save():: Not supported for complex128_t\n");
317 num_feat_entries = 0;
324 num_feat_entries = 0;
347 const T tmp = b.
features[b_idx].entry;
353 dot_prod += tmp * a.
features[a_idx].entry;
398 for (int32_t i=0; i<num_feat_entries; i++)
399 SG_SPRINT(
"%s%s%d:%d", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry ? 1 : 0);
407 for (int32_t i=0; i<num_feat_entries; i++)
408 SG_SPRINT(
"%s%s%d:%c", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
416 for (int32_t i=0; i<num_feat_entries; i++)
417 SG_SPRINT(
"%s%s%d:%d", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
425 for (int32_t i=0; i<num_feat_entries; i++)
426 SG_SPRINT(
"%s%s%d:%u", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
434 for (int32_t i=0; i<num_feat_entries; i++)
435 SG_SPRINT(
"%s%s%d:%d", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
443 for (int32_t i=0; i<num_feat_entries; i++)
444 SG_SPRINT(
"%s%s%d:%u", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
452 for (int32_t i=0; i<num_feat_entries; i++)
453 SG_SPRINT(
"%s%s%d:%d", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
461 for (int32_t i=0; i<num_feat_entries; i++)
462 SG_SPRINT(
"%s%s%d:%u", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
470 for (int32_t i=0; i<num_feat_entries; i++)
471 SG_SPRINT(
"%s%s%d:%lld", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
479 for (int32_t i=0; i<num_feat_entries; i++)
480 SG_SPRINT(
"%s%s%d:%llu ", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
488 for (int32_t i=0; i<num_feat_entries; i++)
489 SG_SPRINT(
"%s%s%d:%g", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
497 for (int32_t i=0; i<num_feat_entries; i++)
498 SG_SPRINT(
"%s%s%d:%.18g", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
506 for (int32_t i=0; i<num_feat_entries; i++)
507 SG_SPRINT(
"%s%s%d:%.36Lg", prefix, i==0 ?
"" :
" ", features[i].feat_index, features[i].entry);
515 for (int32_t i=0; i<num_feat_entries; i++)
516 SG_SPRINT(
"%s%s%d:(%.18lg+i%.18lg)", prefix, i==0 ?
"" :
" ", features[i].feat_index,
517 features[i].entry.real(), features[i].entry.imag());