SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
LogLoss.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2011 Shashwat Lal Das
8  * Written (W) 2012 Fernando José Iglesias García
9  * Copyright (c) 2011 Berlin Institute of Technology and Max-Planck-Society.
10  */
11 
12 #include <shogun/loss/LogLoss.h>
13 
14 using namespace shogun;
15 
17 {
18  return (z >= 0) ? log(1 + exp(-z)) : -z + log(1 + exp(z));
19 }
20 
22 {
23  if (z < 0)
24  return -1 / (exp(z) + 1);
25 
26  float64_t ez = exp(-z);
27  return -ez / (ez + 1);
28 }
29 
31 {
32  float64_t ez = exp(z);
33  return ez / (ez*(ez + 2) + 1);
34 }
35 
37 {
38  float64_t w,x;
39  float64_t d = exp(label * prediction);
40  if(eta_t < 1e-6){
41  /* As with squared loss, for small eta_t we replace the update
42  * with its first order Taylor expansion to avoid numerical problems
43  */
44  return label*eta_t/((1+d)*norm);
45  }
46  x = eta_t + label*prediction + d;
47 
48  /* This piece of code is approximating W(exp(x))-x.
49  * W is the Lambert W function: W(z)*exp(W(z))=z.
50  * The absolute error of this approximation is less than 9e-5.
51  * Faster/better approximations can be substituted here.
52  */
53  float64_t W = x>=1. ? 0.86*x+0.01 : exp(0.8*x-0.65); //initial guess
54  float64_t r = x>=1. ? x-log(W)-W : 0.2*x+0.65-W; //residual
55  float64_t t = 1.+W;
56  float64_t u = 2.*t*(t+2.*r/3.); //magic
57  w = W*(1.+r/t*(u-r)/(u-2.*r))-x; //more magic
58 
59  return -(label*w+prediction)/norm;
60 }
61 
63 {
64  float64_t d = CLossFunction::first_derivative(prediction, label);
65  return d*d;
66 }
67 

SHOGUN Machine Learning Toolbox - Documentation