11 #ifndef _HASHEDDOCDOTFEATURES__H__
12 #define _HASHEDDOCDOTFEATURES__H__
22 template<
class ST>
class CStringFeatures;
23 template<
class ST>
class SGMatrix;
25 class CHashedDocConverter;
53 CTokenizer* tzer=NULL,
bool normalize=
true, int32_t n_grams=1, int32_t skips=0, int32_t size=0);
158 virtual const char*
get_name()
const;
197 bool normalize, int32_t n_grams, int32_t skips);
virtual bool get_next_feature(int32_t &index, float64_t &value, void *iterator)
virtual void free_feature_iterator(void *iterator)
virtual EFeatureClass get_feature_class() const
virtual int32_t get_dim_feature_space() const
virtual float64_t dense_dot(int32_t vec_idx1, const float64_t *vec2, int32_t vec2_len)
virtual void * get_feature_iterator(int32_t vector_index)
virtual const char * get_name() const
Features that support dot products among other operations.
EFeatureClass
shogun feature class
virtual float64_t dense_dot_sgvec(int32_t vec_idx1, const SGVector< float64_t > vec2)
virtual void add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t *vec2, int32_t vec2_len, bool abs_val=false)
CStringFeatures< char > * doc_collection
virtual ~CHashedDocDotFeatures()
The class CTokenizer acts as a base class in order to implement tokenizers. Sub-classes must implemen...
A File access base class.
virtual CFeatures * duplicate() const
virtual float64_t dot(int32_t vec_idx1, CDotFeatures *df, int32_t vec_idx2)
static uint32_t calculate_token_hash(char *token, int32_t length, int32_t num_bits, uint32_t seed)
void set_doc_collection(CStringFeatures< char > *docs)
virtual int32_t get_num_vectors() const
EFeatureType
shogun feature type
CHashedDocDotFeatures(int32_t hash_bits=0, CStringFeatures< char > *docs=NULL, CTokenizer *tzer=NULL, bool normalize=true, int32_t n_grams=1, int32_t skips=0, int32_t size=0)
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
virtual EFeatureType get_feature_type() const
This class can be used to provide on-the-fly vectorization of a document collection. Like in the standard Bag-of-Words representation, this class considers each document as a collection of tokens, which are then hashed into a new feature space of a specified dimension. This class is very flexible and allows the user to specify the tokenizer used to tokenize each document, specify whether the results should be normalized with regards to the sqrt of the document size, as well as to specify whether he wants to combine different tokens. The latter implements a k-skip n-grams approach, meaning that you can combine up to n tokens, while skipping up to k. Eg. for the tokens ["a", "b", "c", "d"], with n_grams = 2 and skips = 2, one would get the following combinations : ["a", "ab", "ac" (skipped 1), "ad" (skipped 2), "b", "bc", "bd" (skipped 1), "c", "cd", "d"].
virtual int32_t get_nnz_features_for_vector(int32_t num)