SHOGUN  4.2.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Autoencoder.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Shogun Toolbox Foundation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7 
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18 
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Written (W) 2014 Khaled Nasr
32  */
33 
39 #include <features/DenseFeatures.h>
40 
41 using namespace shogun;
42 
44 {
45  init();
46 }
47 
48 CAutoencoder::CAutoencoder(int32_t num_inputs, CNeuralLayer* hidden_layer,
49  CNeuralLayer* decoding_layer, float64_t sigma) : CNeuralNetwork()
50 {
51  init();
52 
53  if (decoding_layer==NULL)
54  decoding_layer = new CNeuralLinearLayer(num_inputs);
55 
57  layers->append_element(new CNeuralInputLayer(num_inputs));
58  layers->append_element(hidden_layer);
59  layers->append_element(decoding_layer);
60 
61  set_layers(layers);
62 
63  quick_connect();
64 
65  hidden_layer->autoencoder_position = NLAP_ENCODING;
66  decoding_layer->autoencoder_position = NLAP_DECODING;
67 
69 }
70 
72  int32_t input_width, int32_t input_height, int32_t input_num_channels,
73  CNeuralConvolutionalLayer* hidden_layer,
74  CNeuralConvolutionalLayer* decoding_layer,
75  float64_t sigma)
76  : CNeuralNetwork()
77 {
78  init();
79 
81  layers->append_element(new CNeuralInputLayer(input_width, input_height, input_num_channels));
82  layers->append_element(hidden_layer);
83  layers->append_element(decoding_layer);
84 
85  set_layers(layers);
86 
87  quick_connect();
88 
89  hidden_layer->autoencoder_position = NLAP_ENCODING;
90  decoding_layer->autoencoder_position = NLAP_DECODING;
91 
93 }
94 
95 
97 {
98  REQUIRE(data != NULL, "Invalid (NULL) feature pointer\n");
99 
101 
105  {
106  CNeuralInputLayer* input_layer = (CNeuralInputLayer*)get_layer(0);
107  input_layer->gaussian_noise = m_noise_parameter;
108  }
109 
110  for (int32_t i=0; i<m_num_layers-1; i++)
111  {
112  get_layer(i)->dropout_prop =
114  }
115  get_layer(m_num_layers-1)->dropout_prop = 0.0;
116 
117  m_is_training = true;
118  for (int32_t i=0; i<m_num_layers; i++)
119  get_layer(i)->is_training = true;
120 
121  bool result = false;
123  result = train_gradient_descent(inputs, inputs);
125  result = train_lbfgs(inputs, inputs);
126 
127  for (int32_t i=0; i<m_num_layers; i++)
128  get_layer(i)->is_training = false;
129  m_is_training = false;
130 
132  {
133  CNeuralInputLayer* input_layer = (CNeuralInputLayer*)get_layer(0);
134  input_layer->gaussian_noise = 0;
135  }
136 
137  return result;
138 }
139 
142 {
143  SGMatrix<float64_t> hidden_activation = forward_propagate(data, m_num_layers-2);
144  return new CDenseFeatures<float64_t>(hidden_activation);
145 }
146 
149 {
150  SGMatrix<float64_t> reconstructed = forward_propagate(data);
151  return new CDenseFeatures<float64_t>(reconstructed);
152 }
153 
155 {
156  float64_t error = CNeuralNetwork::compute_error(targets);
157 
158  if (m_contraction_coefficient != 0.0)
159  error +=
160  get_layer(1)->compute_contraction_term(get_section(m_params,1));
161 
162  return error;
163 }
164 
165 template <class T>
166 SGVector<T> CAutoencoder::get_section(SGVector<T> v, int32_t i)
167 {
168  return SGVector<T>(v.vector+m_index_offsets[i],
169  get_layer(i)->get_num_parameters(), false);
170 }
171 
172 void CAutoencoder::init()
173 {
175  m_noise_parameter = 0.0;
177 
178  SG_ADD((machine_int_t*)&m_noise_type, "noise_type",
179  "Noise Type", MS_NOT_AVAILABLE);
180  SG_ADD(&m_noise_parameter, "noise_parameter",
181  "Noise Parameter", MS_NOT_AVAILABLE);
182  SG_ADD(&m_contraction_coefficient, "contraction_coefficient",
183  "Contraction Coefficient", MS_NOT_AVAILABLE);
184 }
SGVector< int32_t > m_index_offsets
virtual int32_t get_num_parameters()
Definition: NeuralLayer.h:281
virtual void initialize_neural_network(float64_t sigma=0.01f)
virtual CDenseFeatures< float64_t > * reconstruct(CDenseFeatures< float64_t > *data)
EAENoiseType m_noise_type
Definition: Autoencoder.h:244
SGVector< float64_t > m_params
A generic multi-layer neural network.
#define REQUIRE(x,...)
Definition: SGIO.h:206
SGMatrix< float64_t > features_to_matrix(CFeatures *features)
Base class for neural network layers.
Definition: NeuralLayer.h:87
virtual float64_t compute_error(SGMatrix< float64_t > targets)
virtual bool train_gradient_descent(SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets)
virtual void quick_connect()
float64_t m_noise_parameter
Definition: Autoencoder.h:247
virtual float64_t compute_error(SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets)
float64_t m_contraction_coefficient
Definition: Autoencoder.h:232
virtual float64_t compute_contraction_term(SGVector< float64_t > parameters)
Definition: NeuralLayer.h:242
shogun vector
virtual bool train(CFeatures *data)
Definition: Autoencoder.cpp:96
ENLAutoencoderPosition autoencoder_position
Definition: NeuralLayer.h:343
double float64_t
Definition: common.h:50
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
ENNOptimizationMethod m_optimization_method
virtual CDenseFeatures< float64_t > * transform(CDenseFeatures< float64_t > *data)
CNeuralLayer * get_layer(int32_t i)
Represents an input layer. The layer can be either connected to all the input features that a network...
Neural layer with linear neurons, with an identity activation function. can be used as a hidden layer...
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
float64_t dropout_prop
Definition: NeuralLayer.h:327
Main component in convolutional neural networks
int machine_int_t
Definition: common.h:59
virtual bool train_lbfgs(SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets)
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual SGMatrix< float64_t > forward_propagate(CFeatures *data, int32_t j=-1)
virtual void set_layers(CDynamicObjectArray *layers)
#define SG_ADD(...)
Definition: SGObject.h:84
virtual bool is_input()
Definition: NeuralLayer.h:127
bool append_element(CSGObject *e)

SHOGUN Machine Learning Toolbox - Documentation