71 for (int32_t i=0; i < dim;i++)
75 B[i]+=diffw/ (lambda*diffw+ loss_val*dst[i]);
83 for (int32_t i=0; i < dim;i++)
87 Bc[i] = Bc[i] * c1 + B[i] * c2;
102 SG_ERROR(
"Specified features are not of type CDotFeatures\n")
111 ASSERT(num_vec==num_train_labels)
125 t = 1 / (eta0 * lambda);
127 SG_INFO(
"lambda=%f, epochs=%d, eta0=%f\n", lambda, epochs, eta0)
139 SG_INFO(
"Training on %d vectors\n", num_vec)
143 bool is_log_loss =
false;
151 for (int32_t i=0; i<num_vec; i++)
160 if (z < 1 || is_log_loss)
189 if (z < 1 || is_log_loss)
215 SG_INFO(
"Estimating sparsity num_vec=%d num_feat=%d.\n", num_vec, c_dim)
220 for (int32_t j=0; j<num_vec ; j++, n++)
225 skip = (int32_t) ((16 * n * c_dim) / r);
virtual int32_t get_nnz_features_for_vector(int32_t num)=0
static void fill_vector(T *vec, int32_t len, T value)
virtual ELabelType get_label_type() const =0
Class CLossFunction is the base class of all loss functions.
void compute_ratio(float64_t *W, float64_t *W_1, float64_t *B, float64_t *dst, int32_t dim, float64_t regularizer_lambda, float64_t loss)
The class Labels models labels, i.e. class assignments of objects.
virtual float64_t dense_dot(int32_t vec_idx1, const float64_t *vec2, int32_t vec2_len)=0
void set_loss_function(CLossFunction *loss_func)
virtual int32_t get_num_labels() const =0
virtual bool train(CFeatures *data=NULL)
virtual int32_t get_num_vectors() const =0
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
void add(bool *param, const char *name, const char *description="")
virtual ELossType get_loss_type()=0
static void clear_cancel()
static void vector_multiply(T *target, const T *v1, const T *v2, int32_t len)
Compute vector multiplication.
virtual void set_features(CDotFeatures *feat)
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
static bool cancel_computations()
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
void combine_and_clip(float64_t *Bc, float64_t *B, int32_t dim, float64_t c1, float64_t c2, float64_t v1, float64_t v2)
SGVector< T > clone() const
SGVector< float64_t > get_computed_dot_feature_vector(int32_t num)
Binary Labels for binary classification.
CHingeLoss implements the hinge loss function.
virtual float64_t first_derivative(float64_t prediction, float64_t label)
bool has_property(EFeatureProperty p) const
virtual void set_labels(CLabels *lab)
ELossType
shogun loss type
void add(const SGVector< T > x)