Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016 #include <shogun/classifier/vw/learners/VwNonAdaptiveLearner.h>
00017
00018 using namespace shogun;
00019
00020 CVwNonAdaptiveLearner::CVwNonAdaptiveLearner()
00021 : CVwLearner()
00022 {
00023 }
00024
00025 CVwNonAdaptiveLearner::CVwNonAdaptiveLearner(CVwRegressor* regressor, CVwEnvironment* vw_env)
00026 : CVwLearner(regressor, vw_env)
00027 {
00028 }
00029
00030 CVwNonAdaptiveLearner::~CVwNonAdaptiveLearner()
00031 {
00032 }
00033
00034 void CVwNonAdaptiveLearner::train(VwExample* &ex, float32_t update)
00035 {
00036 if (fabs(update) == 0.)
00037 return;
00038 vw_size_t thread_mask = env->thread_mask;
00039
00040 vw_size_t thread_num = 0;
00041 float32_t* weights = reg->weight_vectors[thread_num];
00042
00043 for (vw_size_t* i = ex->indices.begin; i != ex->indices.end; i++)
00044 {
00045 for (VwFeature* f = ex->atomics[*i].begin; f != ex->atomics[*i].end; f++)
00046 weights[f->weight_index & thread_mask] += update * f->x;
00047 }
00048
00049 for (int32_t k = 0; k < env->pairs.get_num_elements(); k++)
00050 {
00051 char* i = env->pairs.get_element(k);
00052
00053 v_array<VwFeature> temp = ex->atomics[(int32_t)(i[0])];
00054 temp.begin = ex->atomics[(int32_t)(i[0])].begin;
00055 temp.end = ex->atomics[(int32_t)(i[0])].end;
00056 for (; temp.begin != temp.end; temp.begin++)
00057 quad_update(weights, *temp.begin, ex->atomics[(int32_t)(i[1])], thread_mask, update);
00058 }
00059 }
00060
00061 void CVwNonAdaptiveLearner::quad_update(float32_t* weights, VwFeature& page_feature, v_array<VwFeature> &offer_features, vw_size_t mask, float32_t update)
00062 {
00063 vw_size_t halfhash = quadratic_constant * page_feature.weight_index;
00064 update *= page_feature.x;
00065 for (VwFeature* elem = offer_features.begin; elem != offer_features.end; elem++)
00066 weights[(halfhash + elem->weight_index) & mask] += update * elem->x;
00067 }