23 CVowpalWabbit::CVowpalWabbit()
49 no_training = vw->no_training;
50 dump_interval = vw->dump_interval;
51 sum_loss_since_last_dump = 0.;
52 reg_name = vw->reg_name;
53 reg_dump_text = vw->reg_dump_text;
54 save_predictions = vw->save_predictions;
55 prediction_fd = vw->prediction_fd;
87 if (adaptive_learning)
119 reg_name = file_name;
120 reg_dump_text = is_text;
125 save_predictions =
true;
126 prediction_fd = open(file_name, O_CREAT|O_TRUNC|O_WRONLY, 0666);
127 if (prediction_fd < 0)
128 SG_SERROR(
"Unable to open prediction file %s for writing!\n", file_name)
150 const char* header_fmt =
"%-10s %-10s %8s %8s %10s %8s %8s\n";
155 "average",
"since",
"example",
"example",
156 "current",
"current",
"current");
158 "loss",
"last",
"counter",
"weight",
"label",
"predict",
"features");
171 if (example->
pass != current_pass)
174 current_pass = example->
pass;
182 output_example(example);
198 for (uint32_t i = 0; i < length; i++)
202 if (reg_name != NULL)
212 prediction = inline_l1_predict(ex);
214 prediction = inline_predict(ex);
229 update = (
env->
eta * exact_norm)/sum_abs_x;
261 dump_interval = exp(1.);
262 sum_loss_since_last_dump = 0.;
264 reg_dump_text =
true;
265 save_predictions =
false;
303 ex->
atomics[(int32_t)(i[1])], thread_mask,
341 if (ret < env->min_label)
347 void CVowpalWabbit::output_example(
VwExample* &example)
351 sum_loss_since_last_dump += example->
loss;
354 print_update(example);
359 if (save_predictions)
369 void CVowpalWabbit::print_update(
VwExample* &ex)
371 SG_SPRINT(
"%-10.6f %-10.6f %8lld %8.1f %8.4f %8.4f %8lu\n",
379 sum_loss_since_last_dump = 0.0;
389 int32_t num = sprintf(temp,
"%f", res);
391 t = write(f, temp, num);
395 if (tag.begin != tag.end)
398 t = write(f, temp, 1);
402 t = write(f, tag.begin, sizeof(
char)*tag.index());
403 if (t != (ssize_t) (sizeof(
char)*tag.index()))
408 t = write(f, temp, 1);
416 quiet=verbose==
false;
427 if (g == 0)
return 0.;
436 float32_t* w_vec = &weights[f->weight_index & thread_mask];
439 sum_abs_x += fabsf(f->x);
462 float32_t update2 = g * page_feature.
x * page_feature.
x;
465 float32_t* w_vec = &weights[(halfhash + elem->weight_index) & mask];
468 sum_abs_x += fabsf(elem->x);
uint32_t weight_index
Hashed index in weight vector.
uint32_t vw_size_t
vw_size_t typedef to work across platforms
CVwRegressor * reg
Regressor.
T get_element(int32_t index) const
Class OnlineLinearMachine is a generic interface for linear machines like classifiers which work thro...
void set_adaptive(bool adaptive_learning)
float64_t weighted_examples
Weighted examples.
T * end
Pointer to last set element in the array.
virtual void load_regressor(char *file_name)
virtual void release_example()
virtual void init(CVwEnvironment *env_to_use=NULL)
void set_prediction_out(char *file_name)
T * begin
Pointer to first element of the array.
Class CVwEnvironment is the environment used by VW.
CLossFunction * loss
Loss function.
void set_stride(vw_size_t new_stride)
vw_size_t num_features
Number of features.
void(* update)(float *foo, float bar)
float64_t min_label
Smallest label seen.
virtual VwExample * get_example()
Class v_array taken directly from JL's implementation.
float32_t one_pf_quad_predict_trunc(float32_t *weights, VwFeature &f, v_array< VwFeature > &cross_features, vw_size_t mask, float32_t gravity)
int64_t example_number
Example number.
float32_t total_sum_feat_sq
Total sum of square of features.
virtual void start_parser()
float32_t ** weight_vectors
Weight vectors, one array for each thread.
float32_t l1_regularization
Level of L1 regularization.
vw_size_t num_bits
log_2 of the number of features
int32_t get_num_elements() const
VwAdaptiveLearner uses an adaptive subgradient technique to update weights.
float64_t get_loss(float64_t prediction, float64_t label)
const int32_t quadratic_constant
Constant used while hashing/accessing quadratic features.
float32_t eta
Learning rate.
float32_t real_weight(float32_t w, float32_t gravity)
CVwEnvironment * env
Environment for VW, i.e., globals.
float64_t max_label
Largest label seen.
float32_t label
Label value.
virtual void reset_stream()
float32_t compute_exact_norm_quad(float32_t *weights, VwFeature &page_feature, v_array< VwFeature > &offer_features, vw_size_t mask, float32_t g, float32_t &sum_abs_x)
void load_regressor(char *file_name)
v_array< vw_size_t > indices
Array of namespaces.
virtual void set_learner()
float32_t update_sum
Sum of updates.
bool exact_adaptive_norm
Whether exact norm is used for adaptive learning.
virtual float32_t dense_dot_truncated(const float32_t *vec2, VwExample *&ex, float32_t gravity)
static float32_t invsqrt(float32_t x)
x^0.5, x being a complex128_t
virtual CVwEnvironment * get_env()
float32_t power_t
t power value while updating
float32_t weight
Weight of example.
void push_back(T element)
VwNonAdaptiveLearner uses a standard gradient descent weight update rule.
float32_t eta_decay_rate
Decay rate of eta per pass.
float32_t compute_exact_norm(VwExample *&ex, float32_t &sum_abs_x)
DynArray< char * > pairs
Pairs of features to cross for quadratic updates.
vw_size_t num_passes
Number of passes.
float32_t final_prediction
Final prediction.
virtual void train(VwExample *&ex, float32_t update)=0
vw_size_t stride
Number of elements in weight vector per feature.
void set_exact_adaptive_norm(bool exact_adaptive)
virtual float32_t predict_and_finalize(VwExample *ex)
float32_t example_t
t value for this example
This class implements streaming features for use with VW.
void set_regressor_out(char *file_name, bool is_text=true)
virtual bool get_next_example()
float32_t initial
Initial approximation.
float32_t global_weight
Global weight.
float32_t x
Feature value.
virtual bool train_machine(CFeatures *feat=NULL)
all of classes and functions are contained in the shogun namespace
CStreamingVwFeatures * features
Features.
virtual void end_parser()
The class Features is the base class of all feature objects.
VwLabel * ld
Label object.
float32_t eta_round
Learning rate for this round.
void add_quadratic_pair(char *pair)
void reinitialize_weights()
Class CVowpalWabbit is the implementation of the online learning algorithm used in Vowpal Wabbit...
vw_size_t thread_mask
Mask used by regressor for learning.
bool adaptive
Whether adaptive learning is used.
float32_t one_pf_quad_predict(float32_t *weights, VwFeature &f, v_array< VwFeature > &cross_features, vw_size_t mask)
virtual float32_t dense_dot(VwExample *&ex, const float32_t *vec2)
vw_size_t passes_complete
Number of passes complete.
virtual void dump_regressor(char *reg_name, bool as_text)
float64_t get_update(float64_t prediction, float64_t label, float64_t eta_t, float64_t norm)
CVwLearner * learner
Learner to use.
virtual float64_t get_square_grad(float64_t prediction, float64_t label)=0
float64_t sum_loss
Sum of losses.
v_array< VwFeature > atomics[256]
Array of features.