63 if (i<= num_encoding_layers)
74 "Layer %i (%i neurons) must have the same number of neurons "
75 "as layer %i (%i neurons)\n", i,
get_layer(i)->get_num_neurons(),
76 m_num_layers-i-1,
get_layer(m_num_layers-i-1)->get_num_neurons());
85 for (int32_t i=1; i<=num_encoding_layers; i++)
87 SG_INFO(
"Pre-training Layer %i\n", i);
96 if (strcmp(ae_encoding_layer->
get_name(),
"NeuralConvolutionalLayer")==0)
109 ae_encoding_layer, ae_decoding_layer,
m_sigma);
131 for (int32_t j=0; j<i; j++)
135 for (int32_t j=0; j<i-1; j++)
138 ae->
train(&ae_input_features);
144 for (int32_t j=0; j<ae_params.
vlen;j++)
146 if (j<encoding_layer_params.
vlen)
147 encoding_layer_params[j] = ae_params[j];
149 decoding_layer_params[j-encoding_layer_params.
vlen] = ae_params[j];
183 if (output_layer != NULL)
195 for (int32_t i=0; i<len; i++)
229 void CDeepAutoencoder::init()
void set_gd_learning_rate(float64_t gd_learning_rate)
SGVector< int32_t > m_index_offsets
virtual void set_contraction_coefficient(float64_t coeff)
void set_gd_momentum(float64_t gd_momentum)
virtual int32_t get_num_parameters()
virtual void initialize_neural_network(float64_t sigma=0.01f)
int32_t get_num_parameters()
SGVector< int32_t > pt_max_num_epochs
void set_gd_mini_batch_size(int32_t gd_mini_batch_size)
virtual CSGObject * clone()
SGVector< float64_t > get_parameters()
Represents a single layer neural autoencoder.
virtual CDenseFeatures< float64_t > * transform(CDenseFeatures< float64_t > *data)
SGVector< float64_t > pt_contraction_coefficient
SGVector< float64_t > m_params
A generic multi-layer neural network.
virtual void set_contraction_coefficient(float64_t coeff)
virtual int32_t get_num_neurons()
SGMatrix< float64_t > features_to_matrix(CFeatures *features)
Base class for neural network layers.
virtual void quick_connect()
SGVector< float64_t > pt_gd_error_damping_coeff
void set_max_num_epochs(int32_t max_num_epochs)
virtual float64_t compute_error(SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets)
void set_epsilon(float64_t epsilon)
virtual const char * get_name() const
float64_t m_contraction_coefficient
SGVector< float64_t > pt_epsilon
virtual int32_t get_height()
EAENoiseType
Determines the noise type for denoising autoencoders.
virtual CDenseFeatures< float64_t > * reconstruct(CDenseFeatures< float64_t > *data)
virtual float64_t compute_contraction_term(SGVector< float64_t > parameters)
virtual void pre_train(CFeatures *data)
virtual bool train(CFeatures *data)
virtual void set_batch_size(int32_t batch_size)
ENLAutoencoderPosition autoencoder_position
SGVector< float64_t > pt_l1_coefficient
void set_gd_error_damping_coeff(float64_t gd_error_damping_coeff)
void set_l2_coefficient(float64_t l2_coefficient)
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
virtual int32_t get_width()
CNeuralLayer * get_layer(int32_t i)
all of classes and functions are contained in the shogun namespace
Main component in convolutional neural networks
virtual float64_t compute_error(SGMatrix< float64_t > targets)
void set_l1_coefficient(float64_t l1_coefficient)
virtual void set_batch_size(int32_t batch_size)
The class Features is the base class of all feature objects.
virtual CNeuralNetwork * convert_to_neural_network(CNeuralLayer *output_layer=NULL, float64_t sigma=0.01)
virtual SGMatrix< float64_t > forward_propagate(CFeatures *data, int32_t j=-1)
SGVector< int32_t > pt_gd_mini_batch_size
void set_gd_learning_rate_decay(float64_t gd_learning_rate_decay)
SGVector< float64_t > pt_noise_parameter
void set_noise_type(EAENoiseType noise_type)
virtual void set_layers(CDynamicObjectArray *layers)
SGVector< int32_t > pt_noise_type
void set_optimization_method(ENNOptimizationMethod optimization_method)
SGVector< float64_t > pt_l2_coefficient
SGVector< int32_t > pt_optimization_method
SGVector< float64_t > pt_gd_momentum
SGVector< float64_t > pt_gd_learning_rate
void set_const(T const_elem)
bool append_element(CSGObject *e)
float64_t contraction_coefficient
void set_noise_parameter(float64_t noise_parameter)
SGVector< float64_t > pt_gd_learning_rate_decay