Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include <shogun/classifier/svm/OnlineLibLinear.h>
00014
00015 using namespace shogun;
00016
00017 COnlineLibLinear::COnlineLibLinear()
00018 : COnlineLinearMachine()
00019 {
00020 init();
00021 }
00022
00023 COnlineLibLinear::COnlineLibLinear(float64_t C)
00024 {
00025 init();
00026 C1=C;
00027 C2=C;
00028 use_bias=true;
00029 }
00030
00031 COnlineLibLinear::COnlineLibLinear(
00032 float64_t C, CStreamingDotFeatures* traindat)
00033 {
00034 init();
00035 C1=C;
00036 C2=C;
00037 use_bias=true;
00038
00039 set_features(traindat);
00040 }
00041
00042
00043 void COnlineLibLinear::init()
00044 {
00045 C1=1;
00046 C2=1;
00047 use_bias=false;
00048
00049 m_parameters->add(&C1, "C1", "C Cost constant 1.");
00050 m_parameters->add(&C2, "C2", "C Cost constant 2.");
00051 m_parameters->add(&use_bias, "use_bias", "Indicates if bias is used.");
00052 }
00053
00054 COnlineLibLinear::~COnlineLibLinear()
00055 {
00056 }
00057
00058 bool COnlineLibLinear::train(CFeatures* data)
00059 {
00060 if (data)
00061 {
00062 if (!data->has_property(FP_STREAMING_DOT))
00063 SG_ERROR("Specified features are not of type CStreamingDotFeatures\n");
00064 set_features((CStreamingDotFeatures*) data);
00065 }
00066
00067 float64_t C, d, G;
00068 float64_t QD;
00069
00070
00071 int32_t y_current;
00072 float64_t alpha_current;
00073
00074
00075 float64_t Cp=C1;
00076 float64_t Cn=C2;
00077
00078
00079 float64_t PG;
00080 float64_t PGmax_old = CMath::INFTY;
00081 float64_t PGmin_old = -CMath::INFTY;
00082 float64_t PGmax_new = -CMath::INFTY;
00083 float64_t PGmin_new = CMath::INFTY;
00084
00085
00086 float64_t diag[3] = {0, 0, 0};
00087 float64_t upper_bound[3] = {Cn, 0, Cp};
00088
00089
00090 bias = 0;
00091
00092 PGmax_new = -CMath::INFTY;
00093 PGmin_new = CMath::INFTY;
00094
00095
00096 float64_t v = 0;
00097
00098 int32_t nSV = 0;
00099
00100
00101 features->start_parser();
00102
00103 CTime start_time;
00104 while (features->get_next_example())
00105 {
00106 alpha_current = 0;
00107 if (features->get_label() > 0)
00108 y_current = +1;
00109 else
00110 y_current = -1;
00111
00112 QD = diag[y_current + 1];
00113
00114 QD += features->dot(features);
00115
00116 features->expand_if_required(w, w_dim);
00117
00118 G = features->dense_dot(w, w_dim);
00119 if (use_bias)
00120 G += bias;
00121 G = G*y_current - 1;
00122
00123
00124 C = upper_bound[y_current + 1];
00125 G += alpha_current*diag[y_current + 1];
00126
00127 PG = 0;
00128 if (alpha_current == 0)
00129 {
00130 if (G > PGmax_old)
00131 {
00132 features->release_example();
00133 continue;
00134 }
00135 else if (G < 0)
00136 PG = G;
00137 }
00138 else if (alpha_current == C)
00139 {
00140 if (G < PGmin_old)
00141 {
00142 features->release_example();
00143 continue;
00144 }
00145 else if (G > 0)
00146 PG = G;
00147 }
00148 else
00149 PG = G;
00150
00151 PGmax_new = CMath::max(PGmax_new, PG);
00152 PGmin_new = CMath::min(PGmin_new, PG);
00153
00154 if (fabs(PG) > 1.0e-12)
00155 {
00156 float64_t alpha_old = alpha_current;
00157 alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
00158 d = (alpha_current - alpha_old) * y_current;
00159
00160 features->add_to_dense_vec(d, w, w_dim);
00161
00162 if (use_bias)
00163 bias += d;
00164 }
00165
00166 v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
00167 if (alpha_current > 0)
00168 nSV++;
00169
00170 features->release_example();
00171 }
00172
00173 features->end_parser();
00174
00175 float64_t gap = PGmax_new - PGmin_new;
00176
00177 SG_DONE();
00178 SG_INFO("Optimization finished.\n");
00179
00180
00181 for (int32_t i=0; i<w_dim; i++)
00182 v += w[i]*w[i];
00183 v += bias*bias;
00184
00185 SG_INFO("Objective value = %lf\n", v/2);
00186 SG_INFO("nSV = %d\n", nSV);
00187 SG_INFO("gap = %g\n", gap);
00188
00189 return true;
00190 }