SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
Autoencoder.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Shogun Toolbox Foundation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7 
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its
16  * contributors may be used to endorse or promote products derived from this
17  * software without specific prior written permission.
18 
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Written (W) 2014 Khaled Nasr
32  */
33 
39 #include <features/DenseFeatures.h>
40 
41 using namespace shogun;
42 
44 {
45  init();
46 }
47 
48 CAutoencoder::CAutoencoder(int32_t num_inputs, CNeuralLayer* hidden_layer,
49  CNeuralLayer* decoding_layer, float64_t sigma) : CNeuralNetwork()
50 {
51  init();
52 
53  if (decoding_layer==NULL)
54  decoding_layer = new CNeuralLinearLayer(num_inputs);
55 
57  layers->append_element(new CNeuralInputLayer(num_inputs));
58  layers->append_element(hidden_layer);
59  layers->append_element(decoding_layer);
60 
61  set_layers(layers);
62 
63  quick_connect();
64 
65  hidden_layer->autoencoder_position = NLAP_ENCODING;
66  decoding_layer->autoencoder_position = NLAP_DECODING;
67 
68  initialize(sigma);
69 }
70 
72  int32_t input_width, int32_t input_height, int32_t input_num_channels,
73  CNeuralConvolutionalLayer* hidden_layer,
74  CNeuralConvolutionalLayer* decoding_layer,
75  float64_t sigma)
76  : CNeuralNetwork()
77 {
78  init();
79 
81  layers->append_element(new CNeuralInputLayer(input_width, input_height, input_num_channels));
82  layers->append_element(hidden_layer);
83  layers->append_element(decoding_layer);
84 
85  set_layers(layers);
86 
87  quick_connect();
88 
89  hidden_layer->autoencoder_position = NLAP_ENCODING;
90  decoding_layer->autoencoder_position = NLAP_DECODING;
91 
92  initialize(sigma);
93 }
94 
95 
97 {
98  REQUIRE(data != NULL, "Invalid (NULL) feature pointer\n");
99 
101 
105  {
106  CNeuralInputLayer* input_layer = (CNeuralInputLayer*)get_layer(0);
107  input_layer->gaussian_noise = noise_parameter;
108  }
109 
110  for (int32_t i=0; i<m_num_layers-1; i++)
111  {
112  get_layer(i)->dropout_prop =
114  }
115  get_layer(m_num_layers-1)->dropout_prop = 0.0;
116 
117  m_is_training = true;
118  for (int32_t i=0; i<m_num_layers; i++)
119  get_layer(i)->is_training = true;
120 
121  bool result = false;
123  result = train_gradient_descent(inputs, inputs);
125  result = train_lbfgs(inputs, inputs);
126 
127  for (int32_t i=0; i<m_num_layers; i++)
128  get_layer(i)->is_training = false;
129  m_is_training = false;
130 
132  {
133  CNeuralInputLayer* input_layer = (CNeuralInputLayer*)get_layer(0);
134  input_layer->gaussian_noise = 0;
135  }
136 
137  return result;
138 }
139 
142 {
143  SGMatrix<float64_t> hidden_activation = forward_propagate(data, m_num_layers-2);
144  return new CDenseFeatures<float64_t>(hidden_activation);
145 }
146 
149 {
150  SGMatrix<float64_t> reconstructed = forward_propagate(data);
151  return new CDenseFeatures<float64_t>(reconstructed);
152 }
153 
155 {
156  float64_t error = CNeuralNetwork::compute_error(targets);
157 
158  if (m_contraction_coefficient != 0.0)
159  error +=
160  get_layer(1)->compute_contraction_term(get_section(m_params,1));
161 
162  return error;
163 }
164 
165 template <class T>
166 SGVector<T> CAutoencoder::get_section(SGVector<T> v, int32_t i)
167 {
168  return SGVector<T>(v.vector+m_index_offsets[i],
169  get_layer(i)->get_num_parameters(), false);
170 }
171 
172 void CAutoencoder::init()
173 {
175  noise_parameter = 0.0;
177 
178  SG_ADD((machine_int_t*)&noise_type, "noise_type",
179  "Noise Type", MS_NOT_AVAILABLE);
180  SG_ADD(&noise_parameter, "noise_parameter",
181  "Noise Parameter", MS_NOT_AVAILABLE);
182  SG_ADD(&m_contraction_coefficient, "contraction_coefficient",
183  "Contraction Coefficient", MS_NOT_AVAILABLE);
184 }

SHOGUN Machine Learning Toolbox - Documentation