Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013 #include <shogun/classifier/svm/OnlineLibLinear.h>
00014 #include <shogun/lib/Time.h>
00015
00016 using namespace shogun;
00017
00018 COnlineLibLinear::COnlineLibLinear()
00019 : COnlineLinearMachine()
00020 {
00021 init();
00022 }
00023
00024 COnlineLibLinear::COnlineLibLinear(float64_t C)
00025 {
00026 init();
00027 C1=C;
00028 C2=C;
00029 use_bias=true;
00030 }
00031
00032 COnlineLibLinear::COnlineLibLinear(
00033 float64_t C, CStreamingDotFeatures* traindat)
00034 {
00035 init();
00036 C1=C;
00037 C2=C;
00038 use_bias=true;
00039
00040 set_features(traindat);
00041 }
00042
00043
00044 void COnlineLibLinear::init()
00045 {
00046 C1=1;
00047 C2=1;
00048 use_bias=false;
00049
00050 m_parameters->add(&C1, "C1", "C Cost constant 1.");
00051 m_parameters->add(&C2, "C2", "C Cost constant 2.");
00052 m_parameters->add(&use_bias, "use_bias", "Indicates if bias is used.");
00053 }
00054
00055 COnlineLibLinear::~COnlineLibLinear()
00056 {
00057 }
00058
00059 bool COnlineLibLinear::train(CFeatures* data)
00060 {
00061 if (data)
00062 {
00063 if (!data->has_property(FP_STREAMING_DOT))
00064 SG_ERROR("Specified features are not of type CStreamingDotFeatures\n");
00065 set_features((CStreamingDotFeatures*) data);
00066 }
00067
00068 float64_t C, d, G;
00069 float64_t QD;
00070
00071
00072 int32_t y_current;
00073 float64_t alpha_current;
00074
00075
00076 float64_t Cp=C1;
00077 float64_t Cn=C2;
00078
00079
00080 float64_t PG;
00081 float64_t PGmax_old = CMath::INFTY;
00082 float64_t PGmin_old = -CMath::INFTY;
00083 float64_t PGmax_new = -CMath::INFTY;
00084 float64_t PGmin_new = CMath::INFTY;
00085
00086
00087 float64_t diag[3] = {0, 0, 0};
00088 float64_t upper_bound[3] = {Cn, 0, Cp};
00089
00090
00091 bias = 0;
00092
00093 PGmax_new = -CMath::INFTY;
00094 PGmin_new = CMath::INFTY;
00095
00096
00097 float64_t v = 0;
00098
00099 int32_t nSV = 0;
00100
00101
00102 features->start_parser();
00103
00104 CTime start_time;
00105 while (features->get_next_example())
00106 {
00107 alpha_current = 0;
00108 if (features->get_label() > 0)
00109 y_current = +1;
00110 else
00111 y_current = -1;
00112
00113 QD = diag[y_current + 1];
00114
00115 QD += features->dot(features);
00116
00117 features->expand_if_required(w, w_dim);
00118
00119 G = features->dense_dot(w, w_dim);
00120 if (use_bias)
00121 G += bias;
00122 G = G*y_current - 1;
00123
00124
00125 C = upper_bound[y_current + 1];
00126 G += alpha_current*diag[y_current + 1];
00127
00128 PG = 0;
00129 if (alpha_current == 0)
00130 {
00131 if (G > PGmax_old)
00132 {
00133 features->release_example();
00134 continue;
00135 }
00136 else if (G < 0)
00137 PG = G;
00138 }
00139 else if (alpha_current == C)
00140 {
00141 if (G < PGmin_old)
00142 {
00143 features->release_example();
00144 continue;
00145 }
00146 else if (G > 0)
00147 PG = G;
00148 }
00149 else
00150 PG = G;
00151
00152 PGmax_new = CMath::max(PGmax_new, PG);
00153 PGmin_new = CMath::min(PGmin_new, PG);
00154
00155 if (fabs(PG) > 1.0e-12)
00156 {
00157 float64_t alpha_old = alpha_current;
00158 alpha_current = CMath::min(CMath::max(alpha_current - G/QD, 0.0), C);
00159 d = (alpha_current - alpha_old) * y_current;
00160
00161 features->add_to_dense_vec(d, w, w_dim);
00162
00163 if (use_bias)
00164 bias += d;
00165 }
00166
00167 v += alpha_current*(alpha_current*diag[y_current + 1] - 2);
00168 if (alpha_current > 0)
00169 nSV++;
00170
00171 features->release_example();
00172 }
00173
00174 features->end_parser();
00175
00176 float64_t gap = PGmax_new - PGmin_new;
00177
00178 SG_DONE();
00179 SG_INFO("Optimization finished.\n");
00180
00181
00182 for (int32_t i=0; i<w_dim; i++)
00183 v += w[i]*w[i];
00184 v += bias*bias;
00185
00186 SG_INFO("Objective value = %lf\n", v/2);
00187 SG_INFO("nSV = %d\n", nSV);
00188 SG_INFO("gap = %g\n", gap);
00189
00190 return true;
00191 }