SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
NeuralLogisticLayer.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Shogun Toolbox Foundation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7 
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18 
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Written (W) 2014 Khaled Nasr
32  */
33 
36 #include <shogun/lib/SGVector.h>
37 
38 using namespace shogun;
39 
41 {
42 }
43 
45 CNeuralLinearLayer(num_neurons)
46 {
47 }
48 
50  CDynamicObjectArray* layers)
51 {
52  CNeuralLinearLayer::compute_activations(parameters, layers);
53 
54  // apply logistic activation function
55  int32_t length = m_num_neurons*m_batch_size;
56  for (int32_t i=0; i<length; i++)
57  m_activations[i] = 1.0/(1.0+CMath::exp(-1.0*m_activations[i]));
58 }
59 
61  SGVector< float64_t > parameters)
62 {
64 
66  m_num_neurons, num_inputs, false);
67 
68  float64_t contraction_term = 0;
69  for (int32_t i=0; i<m_num_neurons; i++)
70  {
71  float64_t sum_j = 0;
72  for (int32_t j=0; j<num_inputs; j++)
73  sum_j += W(i,j)*W(i,j);
74 
75  for (int32_t k=0; k<m_batch_size; k++)
76  {
77  float64_t h_ = m_activations(i,k)*(1-m_activations(i,k));
78  contraction_term += h_*h_*sum_j;
79  }
80  }
81 
82  return (contraction_coefficient/m_batch_size) * contraction_term;
83 }
84 
86  SGVector< float64_t > parameters, SGVector< float64_t > gradients)
87 {
89 
91  m_num_neurons, num_inputs, false);
93  m_num_neurons, num_inputs, false);
94 
95  for (int32_t k = 0; k<m_batch_size; k++)
96  {
97  for (int32_t i=0; i<m_num_neurons; i++)
98  {
99  for (int32_t j=0; j<num_inputs; j++)
100  {
101  float64_t h = m_activations(i,k);
102  float64_t w = W(i,j);
103  float64_t h_ = w*h*(1-h);
104 
105  float64_t g = 2*w*(h-1)*h*(h*(2*w*h_-1)-w*h_+h*h);
106 
107  WG(i,j) += (contraction_coefficient/m_batch_size)*g;
108  }
109  }
110  }
111 }
112 
113 
115 {
117 
118  // multiply by the derivative of the logistic function
119  int32_t length = m_num_neurons*m_batch_size;
120  for (int32_t i=0; i<length; i++)
121  m_local_gradients[i] *= m_activations[i] * (1.0-m_activations[i]);
122 }
virtual float64_t compute_contraction_term(SGVector< float64_t > parameters)
SGVector< int32_t > m_input_sizes
Definition: NeuralLayer.h:368
virtual void compute_contraction_term_gradients(SGVector< float64_t > parameters, SGVector< float64_t > gradients)
SGMatrix< float64_t > m_activations
Definition: NeuralLayer.h:376
virtual void compute_activations(SGVector< float64_t > parameters, CDynamicObjectArray *layers)
SGMatrix< float64_t > m_local_gradients
Definition: NeuralLayer.h:387
virtual void compute_local_gradients(SGMatrix< float64_t > targets)
index_t vlen
Definition: SGVector.h:494
double float64_t
Definition: common.h:50
static T sum(T *vec, int32_t len)
Return sum(vec)
Definition: SGVector.h:354
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
Neural layer with linear neurons, with an identity activation function. can be used as a hidden layer...
virtual void compute_activations(SGVector< float64_t > parameters, CDynamicObjectArray *layers)
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
static float64_t exp(float64_t x)
Definition: Math.h:621
virtual void compute_local_gradients(SGMatrix< float64_t > targets)
float64_t contraction_coefficient
Definition: NeuralLayer.h:338

SHOGUN Machine Learning Toolbox - Documentation