SHOGUN
4.2.0
|
Represents a single layer neural autoencoder.
An autoencoder is a neural network that has three layers: an input layer, a hidden (encoding) layer, and a decoding layer. The network is trained to reconstruct its inputs, which forces the hidden layer to try to learn good representations of the inputs.
This class supports training normal autoencoders and denoising autoencoders [Vincent, 2008]. To use denoising autoencoders set noise_type and noise_parameter to specify the type and strength of the noise.
NOTE: LBFGS does not work properly with denoising autoencoders due to their stochastic nature. Use gradient descent instead.
Contractive autoencoders [Rifai, 2011] are also supported. To use them, call set_contraction_coefficient(). Denoising can also be used with contractive autoencoders through noise_type and noise_parameter.
Convolutional autoencoders [J Masci, 2011] are also supported. Simply build the autoencoder using CNeuralConvolutionalLayer objects.
NOTE: Contractive convolutional autoencoders are not supported.
Definition at line 86 of file Autoencoder.h.
Public Member Functions | |
CAutoencoder () | |
CAutoencoder (int32_t num_inputs, CNeuralLayer *hidden_layer, CNeuralLayer *decoding_layer=NULL, float64_t sigma=0.01) | |
CAutoencoder (int32_t input_width, int32_t input_height, int32_t input_num_channels, CNeuralConvolutionalLayer *hidden_layer, CNeuralConvolutionalLayer *decoding_layer, float64_t sigma=0.01) | |
virtual bool | train (CFeatures *data) |
virtual CDenseFeatures < float64_t > * | transform (CDenseFeatures< float64_t > *data) |
virtual CDenseFeatures < float64_t > * | reconstruct (CDenseFeatures< float64_t > *data) |
virtual void | set_contraction_coefficient (float64_t coeff) |
virtual | ~CAutoencoder () |
virtual const char * | get_name () const |
void | set_noise_type (EAENoiseType noise_type) |
EAENoiseType | get_noise_type () |
void | set_noise_parameter (float64_t noise_parameter) |
float64_t | get_noise_parameter () |
virtual void | set_layers (CDynamicObjectArray *layers) |
virtual void | connect (int32_t i, int32_t j) |
virtual void | quick_connect () |
virtual void | disconnect (int32_t i, int32_t j) |
virtual void | disconnect_all () |
virtual void | initialize_neural_network (float64_t sigma=0.01f) |
virtual CBinaryLabels * | apply_binary (CFeatures *data) |
virtual CRegressionLabels * | apply_regression (CFeatures *data) |
virtual CMulticlassLabels * | apply_multiclass (CFeatures *data) |
virtual void | set_labels (CLabels *lab) |
virtual EMachineType | get_classifier_type () |
virtual EProblemType | get_machine_problem_type () const |
virtual float64_t | check_gradients (float64_t approx_epsilon=1.0e-3, float64_t s=1.0e-9) |
SGVector< float64_t > * | get_layer_parameters (int32_t i) |
int32_t | get_num_parameters () |
SGVector< float64_t > | get_parameters () |
int32_t | get_num_inputs () |
int32_t | get_num_outputs () |
CDynamicObjectArray * | get_layers () |
void | set_optimization_method (ENNOptimizationMethod optimization_method) |
ENNOptimizationMethod | get_optimization_method () const |
void | set_l2_coefficient (float64_t l2_coefficient) |
float64_t | get_l2_coefficient () const |
void | set_l1_coefficient (float64_t l1_coefficient) |
float64_t | get_l1_coefficient () const |
void | set_dropout_hidden (float64_t dropout_hidden) |
float64_t | get_dropout_hidden () const |
void | set_dropout_input (float64_t dropout_input) |
float64_t | get_dropout_input () const |
void | set_max_norm (float64_t max_norm) |
float64_t | get_max_norm () const |
void | set_epsilon (float64_t epsilon) |
float64_t | get_epsilon () const |
void | set_max_num_epochs (int32_t max_num_epochs) |
int32_t | get_max_num_epochs () const |
void | set_gd_mini_batch_size (int32_t gd_mini_batch_size) |
int32_t | get_gd_mini_batch_size () const |
void | set_gd_learning_rate (float64_t gd_learning_rate) |
float64_t | get_gd_learning_rate () const |
void | set_gd_learning_rate_decay (float64_t gd_learning_rate_decay) |
float64_t | get_gd_learning_rate_decay () const |
void | set_gd_momentum (float64_t gd_momentum) |
float64_t | get_gd_momentum () const |
void | set_gd_error_damping_coeff (float64_t gd_error_damping_coeff) |
float64_t | get_gd_error_damping_coeff () const |
virtual CLabels * | apply (CFeatures *data=NULL) |
virtual CStructuredLabels * | apply_structured (CFeatures *data=NULL) |
virtual CLatentLabels * | apply_latent (CFeatures *data=NULL) |
virtual CLabels * | get_labels () |
void | set_max_train_time (float64_t t) |
float64_t | get_max_train_time () |
void | set_solver_type (ESolverType st) |
ESolverType | get_solver_type () |
virtual void | set_store_model_features (bool store_model) |
virtual bool | train_locked (SGVector< index_t > indices) |
virtual float64_t | apply_one (int32_t i) |
virtual CLabels * | apply_locked (SGVector< index_t > indices) |
virtual CBinaryLabels * | apply_locked_binary (SGVector< index_t > indices) |
virtual CRegressionLabels * | apply_locked_regression (SGVector< index_t > indices) |
virtual CMulticlassLabels * | apply_locked_multiclass (SGVector< index_t > indices) |
virtual CStructuredLabels * | apply_locked_structured (SGVector< index_t > indices) |
virtual CLatentLabels * | apply_locked_latent (SGVector< index_t > indices) |
virtual void | data_lock (CLabels *labs, CFeatures *features) |
virtual void | post_lock (CLabels *labs, CFeatures *features) |
virtual void | data_unlock () |
virtual bool | supports_locking () const |
bool | is_data_locked () const |
virtual CSGObject * | shallow_copy () const |
virtual CSGObject * | deep_copy () const |
virtual bool | is_generic (EPrimitiveType *generic) const |
template<class T > | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
template<> | |
void | set_generic () |
void | unset_generic () |
virtual void | print_serializable (const char *prefix="") |
virtual bool | save_serializable (CSerializableFile *file, const char *prefix="") |
virtual bool | load_serializable (CSerializableFile *file, const char *prefix="") |
void | set_global_io (SGIO *io) |
SGIO * | get_global_io () |
void | set_global_parallel (Parallel *parallel) |
Parallel * | get_global_parallel () |
void | set_global_version (Version *version) |
Version * | get_global_version () |
SGStringList< char > | get_modelsel_names () |
void | print_modsel_params () |
char * | get_modsel_param_descr (const char *param_name) |
index_t | get_modsel_param_index (const char *param_name) |
void | build_gradient_parameter_dictionary (CMap< TParameter *, CSGObject * > *dict) |
bool | has (const std::string &name) const |
template<typename T > | |
bool | has (const Tag< T > &tag) const |
template<typename T , typename U = void> | |
bool | has (const std::string &name) const |
template<typename T > | |
void | set (const Tag< T > &_tag, const T &value) |
template<typename T , typename U = void> | |
void | set (const std::string &name, const T &value) |
template<typename T > | |
T | get (const Tag< T > &_tag) const |
template<typename T , typename U = void> | |
T | get (const std::string &name) const |
virtual void | update_parameter_hash () |
virtual bool | parameter_hash_changed () |
virtual bool | equals (CSGObject *other, float64_t accuracy=0.0, bool tolerant=false) |
virtual CSGObject * | clone () |
Public Attributes | |
SGIO * | io |
Parallel * | parallel |
Version * | version |
Parameter * | m_parameters |
Parameter * | m_model_selection_parameters |
Parameter * | m_gradient_parameters |
uint32_t | m_hash |
Protected Member Functions | |
virtual float64_t | compute_error (SGMatrix< float64_t > targets) |
virtual bool | train_machine (CFeatures *data=NULL) |
virtual bool | train_gradient_descent (SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets) |
virtual bool | train_lbfgs (SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets) |
virtual SGMatrix< float64_t > | forward_propagate (CFeatures *data, int32_t j=-1) |
virtual SGMatrix< float64_t > | forward_propagate (SGMatrix< float64_t > inputs, int32_t j=-1) |
virtual void | set_batch_size (int32_t batch_size) |
virtual float64_t | compute_gradients (SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets, SGVector< float64_t > gradients) |
virtual float64_t | compute_error (SGMatrix< float64_t > inputs, SGMatrix< float64_t > targets) |
virtual bool | is_label_valid (CLabels *lab) const |
CNeuralLayer * | get_layer (int32_t i) |
SGMatrix< float64_t > | features_to_matrix (CFeatures *features) |
SGMatrix< float64_t > | labels_to_matrix (CLabels *labs) |
virtual void | store_model_features () |
virtual bool | train_require_labels () const |
virtual void | load_serializable_pre () throw (ShogunException) |
virtual void | load_serializable_post () throw (ShogunException) |
virtual void | save_serializable_pre () throw (ShogunException) |
virtual void | save_serializable_post () throw (ShogunException) |
template<typename T > | |
void | register_param (Tag< T > &_tag, const T &value) |
template<typename T > | |
void | register_param (const std::string &name, const T &value) |
CAutoencoder | ( | ) |
default constructor
Definition at line 43 of file Autoencoder.cpp.
CAutoencoder | ( | int32_t | num_inputs, |
CNeuralLayer * | hidden_layer, | ||
CNeuralLayer * | decoding_layer = NULL , |
||
float64_t | sigma = 0.01 |
||
) |
Constructor
num_inputs | Number of inputs |
hidden_layer | Hidden layer. Can be any CNeuralLayer based object that supports being used as a hidden layer |
decoding_layer | Decoding layer. Must have the same number of neurons as num_inputs. Can be any CNeuralLayer based object that supports being used as an output layer. If NULL, a CNeuralLinearLayer is used. |
sigma | Standard deviation of the gaussian used to initialize the parameters |
Definition at line 48 of file Autoencoder.cpp.
CAutoencoder | ( | int32_t | input_width, |
int32_t | input_height, | ||
int32_t | input_num_channels, | ||
CNeuralConvolutionalLayer * | hidden_layer, | ||
CNeuralConvolutionalLayer * | decoding_layer, | ||
float64_t | sigma = 0.01 |
||
) |
Constructor for convolutional autoencoders
input_width | Width of the input images |
input_height | height of the input images |
input_num_channels | number of channels in the input images |
hidden_layer | Hidden layer |
decoding_layer | Decoding layer. Should have the same dimensions as the inputs. |
sigma | Standard deviation of the gaussian used to initialize the parameters |
Definition at line 71 of file Autoencoder.cpp.
|
virtual |
Definition at line 164 of file Autoencoder.h.
apply machine to data if data is not specified apply to the current features
data | (test)data to be classified |
Definition at line 152 of file Machine.cpp.
|
virtualinherited |
apply machine to data in means of binary classification problem
Reimplemented from CMachine.
Definition at line 158 of file NeuralNetwork.cpp.
|
virtualinherited |
apply machine to data in means of latent problem
Reimplemented in CLinearLatentMachine.
Definition at line 232 of file Machine.cpp.
Applies a locked machine on a set of indices. Error if machine is not locked
indices | index vector (of locked features) that is predicted |
Definition at line 187 of file Machine.cpp.
|
virtualinherited |
applies a locked machine on a set of indices for binary problems
Reimplemented in CKernelMachine.
Definition at line 238 of file Machine.cpp.
|
virtualinherited |
applies a locked machine on a set of indices for latent problems
Definition at line 266 of file Machine.cpp.
|
virtualinherited |
applies a locked machine on a set of indices for multiclass problems
Definition at line 252 of file Machine.cpp.
|
virtualinherited |
applies a locked machine on a set of indices for regression problems
Reimplemented in CKernelMachine.
Definition at line 245 of file Machine.cpp.
|
virtualinherited |
applies a locked machine on a set of indices for structured problems
Definition at line 259 of file Machine.cpp.
|
virtualinherited |
apply machine to data in means of multiclass classification problem
Reimplemented from CMachine.
Definition at line 199 of file NeuralNetwork.cpp.
|
virtualinherited |
applies to one vector
Reimplemented in CKernelMachine, CRelaxedTree, COnlineLinearMachine, CLinearMachine, CKNN, CMulticlassMachine, CDistanceMachine, CScatterSVM, CGaussianNaiveBayes, and CPluginEstimate.
|
virtualinherited |
apply machine to data in means of regression problem
Reimplemented from CMachine.
Definition at line 187 of file NeuralNetwork.cpp.
|
virtualinherited |
apply machine to data in means of SO classification problem
Reimplemented in CLinearStructuredOutputMachine.
Definition at line 226 of file Machine.cpp.
|
inherited |
Builds a dictionary of all parameters in SGObject as well of those of SGObjects that are parameters of this object. Dictionary maps parameters to the objects that own them.
dict | dictionary of parameters to be built. |
Definition at line 630 of file SGObject.cpp.
|
virtualinherited |
Checks if the gradients computed using backpropagation are correct by comparing them with gradients computed using numerical approximation. Used for testing purposes only.
Gradients are numerically approximated according to:
\[ c = max(\epsilon x, s) \]
\[ f'(x) = \frac{f(x + c)-f(x - c)}{2c} \]
approx_epsilon | Constant used during gradient approximation |
s | Some small value, used to prevent division by zero |
Definition at line 554 of file NeuralNetwork.cpp.
|
virtualinherited |
Creates a clone of the current object. This is done via recursively traversing all parameters, which corresponds to a deep copy. Calling equals on the cloned object always returns true although none of the memory of both objects overlaps.
Definition at line 747 of file SGObject.cpp.
Computes the error between the output layer's activations and the given target activations.
targets | desired values for the network's output, matrix of size num_neurons_output_layer*batch_size |
Reimplemented from CNeuralNetwork.
Reimplemented in CDeepAutoencoder.
Definition at line 154 of file Autoencoder.cpp.
|
protectedvirtualinherited |
Forward propagates the inputs and computes the error between the output layer's activations and the given target activations.
inputs | inputs to the network, a matrix of size m_num_inputs*m_batch_size |
targets | desired values for the network's output, matrix of size num_neurons_output_layer*batch_size |
Definition at line 546 of file NeuralNetwork.cpp.
|
protectedvirtualinherited |
Applies backpropagation to compute the gradients of the error with repsect to every parameter in the network.
inputs | inputs to the network, a matrix of size m_num_inputs*m_batch_size |
targets | desired values for the output layer's activations. matrix of size m_layers[m_num_layers-1].get_num_neurons()*m_batch_size |
gradients | array to be filled with gradient values. |
Definition at line 467 of file NeuralNetwork.cpp.
|
virtualinherited |
Connects layer i as input to layer j. In order for forward and backpropagation to work correctly, i must be less that j
Definition at line 75 of file NeuralNetwork.cpp.
Locks the machine on given labels and data. After this call, only train_locked and apply_locked may be called
Only possible if supports_locking() returns true
labs | labels used for locking |
features | features used for locking |
Reimplemented in CKernelMachine.
Definition at line 112 of file Machine.cpp.
|
virtualinherited |
Unlocks a locked machine and restores previous state
Reimplemented in CKernelMachine.
Definition at line 143 of file Machine.cpp.
|
virtualinherited |
A deep copy. All the instance variables will also be copied.
Definition at line 231 of file SGObject.cpp.
|
virtualinherited |
Disconnects layer i from layer j
Definition at line 88 of file NeuralNetwork.cpp.
|
virtualinherited |
Removes all connections in the network
Definition at line 93 of file NeuralNetwork.cpp.
Recursively compares the current SGObject to another one. Compares all registered numerical parameters, recursion upon complex (SGObject) parameters. Does not compare pointers!
May be overwritten but please do with care! Should not be necessary in most cases.
other | object to compare with |
accuracy | accuracy to use for comparison (optional) |
tolerant | allows linient check on float equality (within accuracy) |
Definition at line 651 of file SGObject.cpp.
Ensures the given features are suitable for use with the network and returns their feature matrix
Definition at line 614 of file NeuralNetwork.cpp.
|
protectedvirtualinherited |
Applies forward propagation, computes the activations of each layer up to layer j
data | input features |
j | layer index at which the propagation should stop. If -1, the propagation continues up to the last layer |
Definition at line 439 of file NeuralNetwork.cpp.
|
protectedvirtualinherited |
Applies forward propagation, computes the activations of each layer up to layer j
inputs | inputs to the network, a matrix of size m_num_inputs*m_batch_size |
j | layer index at which the propagation should stop. If -1, the propagation continues up to the last layer |
Definition at line 446 of file NeuralNetwork.cpp.
|
inherited |
Getter for a class parameter, identified by a Tag. Throws an exception if the class does not have such a parameter.
_tag | name and type information of parameter |
Definition at line 367 of file SGObject.h.
|
inherited |
Getter for a class parameter, identified by a name. Throws an exception if the class does not have such a parameter.
name | name of the parameter |
Definition at line 388 of file SGObject.h.
|
virtualinherited |
get classifier type
Reimplemented from CMachine.
Definition at line 188 of file NeuralNetwork.h.
|
inherited |
Returns dropout probability for hidden layers
Definition at line 292 of file NeuralNetwork.h.
|
inherited |
Returns dropout probability for input layers
Definition at line 312 of file NeuralNetwork.h.
|
inherited |
Returns epsilon
Definition at line 346 of file NeuralNetwork.h.
|
inherited |
Definition at line 454 of file NeuralNetwork.h.
|
inherited |
Returns gradient descent learning rate
Definition at line 393 of file NeuralNetwork.h.
|
inherited |
Returns gradient descent learning rate decay
Definition at line 410 of file NeuralNetwork.h.
|
inherited |
Returns mini batch size
Definition at line 378 of file NeuralNetwork.h.
|
inherited |
Returns gradient descent momentum multiplier
Definition at line 431 of file NeuralNetwork.h.
|
inherited |
|
inherited |
|
inherited |
|
inherited |
Returns L1 coefficient
Definition at line 272 of file NeuralNetwork.h.
|
inherited |
Returns L2 coefficient
Definition at line 258 of file NeuralNetwork.h.
|
virtualinherited |
|
protectedinherited |
returns a pointer to layer i in the network
Definition at line 723 of file NeuralNetwork.cpp.
returns a copy of a layer's parameters array
i | index of the layer |
Definition at line 712 of file NeuralNetwork.cpp.
|
inherited |
Returns an array holding the network's layers
Definition at line 744 of file NeuralNetwork.cpp.
|
virtualinherited |
returns type of problem machine solves
Reimplemented from CMachine.
Definition at line 675 of file NeuralNetwork.cpp.
|
inherited |
Returns maximum allowable L2 norm
Definition at line 328 of file NeuralNetwork.h.
|
inherited |
Returns maximum number of epochs
Definition at line 362 of file NeuralNetwork.h.
|
inherited |
|
inherited |
Definition at line 531 of file SGObject.cpp.
|
inherited |
Returns description of a given parameter string, if it exists. SG_ERROR otherwise
param_name | name of the parameter |
Definition at line 555 of file SGObject.cpp.
|
inherited |
Returns index of model selection parameter with provided index
param_name | name of model selection parameter |
Definition at line 568 of file SGObject.cpp.
|
virtual |
Returns the name of the SGSerializable instance. It MUST BE the CLASS NAME without the prefixed `C'.
Reimplemented from CNeuralNetwork.
Reimplemented in CDeepAutoencoder.
Definition at line 166 of file Autoencoder.h.
float64_t get_noise_parameter | ( | ) |
Returns noise parameter
Definition at line 201 of file Autoencoder.h.
EAENoiseType get_noise_type | ( | ) |
Returns noise type for denoising autoencoders
Definition at line 185 of file Autoencoder.h.
|
inherited |
returns the number of inputs the network takes
Definition at line 224 of file NeuralNetwork.h.
|
inherited |
returns the number of neurons in the output layer
Definition at line 739 of file NeuralNetwork.cpp.
|
inherited |
returns the totat number of parameters in the network
Definition at line 218 of file NeuralNetwork.h.
|
inherited |
Returns optimization method
Definition at line 244 of file NeuralNetwork.h.
return the network's parameter array
Definition at line 221 of file NeuralNetwork.h.
|
inherited |
|
inherited |
Checks if object has a class parameter identified by a name.
name | name of the parameter |
Definition at line 289 of file SGObject.h.
|
inherited |
Checks if object has a class parameter identified by a Tag.
tag | tag of the parameter containing name and type information |
Definition at line 301 of file SGObject.h.
|
inherited |
Checks if a type exists for a class parameter identified by a name.
name | name of the parameter |
Definition at line 312 of file SGObject.h.
|
virtualinherited |
Initializes the network
sigma | standard deviation of the gaussian used to randomly initialize the parameters |
Definition at line 98 of file NeuralNetwork.cpp.
|
inherited |
|
virtualinherited |
If the SGSerializable is a class template then TRUE will be returned and GENERIC is set to the type of the generic.
generic | set to the type of the generic if returning TRUE |
Definition at line 329 of file SGObject.cpp.
|
protectedvirtualinherited |
check whether the labels is valid.
Subclasses can override this to implement their check of label types.
lab | the labels being checked, guaranteed to be non-NULL |
Reimplemented from CMachine.
Definition at line 689 of file NeuralNetwork.cpp.
converts the given labels into a matrix suitable for use with network
Definition at line 630 of file NeuralNetwork.cpp.
|
virtualinherited |
Load this object from file. If it will fail (returning FALSE) then this object will contain inconsistent data and should not be used!
file | where to load from |
prefix | prefix for members |
Definition at line 402 of file SGObject.cpp.
|
protectedvirtualinherited |
Can (optionally) be overridden to post-initialize some member variables which are not PARAMETER::ADD'ed. Make sure that at first the overridden method BASE_CLASS::LOAD_SERIALIZABLE_POST is called.
ShogunException | will be thrown if an error occurs. |
Reimplemented in CKernel, CWeightedDegreePositionStringKernel, CList, CAlphabet, CLinearHMM, CGaussianKernel, CInverseMultiQuadricKernel, CCircularKernel, and CExponentialKernel.
Definition at line 459 of file SGObject.cpp.
|
protectedvirtualinherited |
Can (optionally) be overridden to pre-initialize some member variables which are not PARAMETER::ADD'ed. Make sure that at first the overridden method BASE_CLASS::LOAD_SERIALIZABLE_PRE is called.
ShogunException | will be thrown if an error occurs. |
Reimplemented in CDynamicArray< T >, CDynamicArray< float64_t >, CDynamicArray< float32_t >, CDynamicArray< int32_t >, CDynamicArray< char >, CDynamicArray< bool >, and CDynamicObjectArray.
Definition at line 454 of file SGObject.cpp.
|
virtualinherited |
Definition at line 295 of file SGObject.cpp.
|
inherited |
prints all parameter registered for model selection and their type
Definition at line 507 of file SGObject.cpp.
|
virtualinherited |
prints registered parameters out
prefix | prefix for members |
Definition at line 341 of file SGObject.cpp.
|
virtualinherited |
Connects each layer to the layer after it. That is, connects layer i to as input to layer i+1 for all i.
Definition at line 81 of file NeuralNetwork.cpp.
|
virtual |
Reconstructs the input data
data | Input features |
Reimplemented in CDeepAutoencoder.
Definition at line 147 of file Autoencoder.cpp.
|
protectedinherited |
Registers a class parameter which is identified by a tag. This enables the parameter to be modified by set() and retrieved by get(). Parameters can be registered in the constructor of the class.
_tag | name and type information of parameter |
value | value of the parameter |
Definition at line 439 of file SGObject.h.
|
protectedinherited |
Registers a class parameter which is identified by a name. This enables the parameter to be modified by set() and retrieved by get(). Parameters can be registered in the constructor of the class.
name | name of the parameter |
value | value of the parameter along with type information |
Definition at line 452 of file SGObject.h.
|
virtualinherited |
Save this object to file.
file | where to save the object; will be closed during returning if PREFIX is an empty string. |
prefix | prefix for members |
Definition at line 347 of file SGObject.cpp.
|
protectedvirtualinherited |
Can (optionally) be overridden to post-initialize some member variables which are not PARAMETER::ADD'ed. Make sure that at first the overridden method BASE_CLASS::SAVE_SERIALIZABLE_POST is called.
ShogunException | will be thrown if an error occurs. |
Reimplemented in CKernel.
Definition at line 469 of file SGObject.cpp.
|
protectedvirtualinherited |
Can (optionally) be overridden to pre-initialize some member variables which are not PARAMETER::ADD'ed. Make sure that at first the overridden method BASE_CLASS::SAVE_SERIALIZABLE_PRE is called.
ShogunException | will be thrown if an error occurs. |
Reimplemented in CKernel, CDynamicArray< T >, CDynamicArray< float64_t >, CDynamicArray< float32_t >, CDynamicArray< int32_t >, CDynamicArray< char >, CDynamicArray< bool >, and CDynamicObjectArray.
Definition at line 464 of file SGObject.cpp.
|
inherited |
Setter for a class parameter, identified by a Tag. Throws an exception if the class does not have such a parameter.
_tag | name and type information of parameter |
value | value of the parameter |
Definition at line 328 of file SGObject.h.
|
inherited |
Setter for a class parameter, identified by a name. Throws an exception if the class does not have such a parameter.
name | name of the parameter |
value | value of the parameter along with type information |
Definition at line 354 of file SGObject.h.
|
protectedvirtualinherited |
Sets the batch size (the number of train/test cases) the network is expected to deal with. Allocates memory for the activations, local gradients, input gradients if necessary (if the batch size is different from it's previous value)
batch_size | number of train/test cases the network is expected to deal with. |
Definition at line 604 of file NeuralNetwork.cpp.
|
virtual |
Sets the contraction coefficient
For contractive autoencoders [Rifai, 2011], a term:
\[ \frac{\lambda}{N} \sum_{k=0}^{N-1} \left \| J(x_k) \right \|^2_F \]
is added to the error, where \( \left \| J(x_k)) \right \|^2_F \) is the Frobenius norm of the Jacobian of the activations of the hidden layer with respect to its inputs, \( N \) is the batch size, and \( \lambda \) is the contraction coefficient.
coeff | Contraction coefficient |
Reimplemented in CDeepAutoencoder.
Definition at line 158 of file Autoencoder.h.
|
inherited |
Sets the probabilty that a hidden layer neuron will be dropped out When using this, the recommended value is 0.5 default value 0.0 (no dropout)
For more details on dropout, see paper [Hinton, 2012]
dropout_hidden | dropout probability |
Definition at line 286 of file NeuralNetwork.h.
|
inherited |
Sets the probabilty that an input layer neuron will be dropped out When using this, a good value might be 0.2 default value 0.0 (no dropout)
For more details on dropout, see this paper [Hinton, 2012]
dropout_input | dropout probability |
Definition at line 306 of file NeuralNetwork.h.
|
inherited |
Sets convergence criteria training stops when (E'- E)/E < epsilon where E is the error at the current iterations and E' is the error at the previous iteration default value is 1.0e-5
epsilon | convergence criteria |
Definition at line 340 of file NeuralNetwork.h.
|
inherited |
Sets gradient descent error damping coefficient Used to damp the error fluctuations when stochastic gradient descent is used. damping is done according to: error_damped(i) = c*error(i) + (1-c)*error_damped(i-1) where c is the damping coefficient
If -1, the damping coefficient is automatically computed according to: c = 0.99*gd_mini_batch_size/training_set_size + 1e-2;
default value is -1
gd_error_damping_coeff | error damping coefficient |
Definition at line 449 of file NeuralNetwork.h.
|
inherited |
Sets gradient descent learning rate defualt value 0.1
gd_learning_rate | gradient descent learning rate |
Definition at line 387 of file NeuralNetwork.h.
|
inherited |
Sets gradient descent learning rate decay learning rate is updated at each iteration i according to: alpha(i)=decay*alpha(i-1) default value is 1.0 (no decay)
gd_learning_rate_decay | gradient descent learning rate decay |
Definition at line 404 of file NeuralNetwork.h.
|
inherited |
Sets size of the mini-batch used during gradient descent training, if 0 full-batch training is performed default value is 0
gd_mini_batch_size | mini batch size |
Definition at line 372 of file NeuralNetwork.h.
|
inherited |
Sets gradient descent momentum multiplier
default value is 0.9
For more details on momentum, see this paper [Sutskever, 2013]
gd_momentum | gradient descent momentum multiplier |
Definition at line 425 of file NeuralNetwork.h.
|
inherited |
Definition at line 74 of file SGObject.cpp.
|
inherited |
Definition at line 79 of file SGObject.cpp.
|
inherited |
Definition at line 84 of file SGObject.cpp.
|
inherited |
Definition at line 89 of file SGObject.cpp.
|
inherited |
Definition at line 94 of file SGObject.cpp.
|
inherited |
Definition at line 99 of file SGObject.cpp.
|
inherited |
Definition at line 104 of file SGObject.cpp.
|
inherited |
Definition at line 109 of file SGObject.cpp.
|
inherited |
Definition at line 114 of file SGObject.cpp.
|
inherited |
Definition at line 119 of file SGObject.cpp.
|
inherited |
Definition at line 124 of file SGObject.cpp.
|
inherited |
Definition at line 129 of file SGObject.cpp.
|
inherited |
Definition at line 134 of file SGObject.cpp.
|
inherited |
Definition at line 139 of file SGObject.cpp.
|
inherited |
Definition at line 144 of file SGObject.cpp.
|
inherited |
set generic type to T
|
inherited |
|
inherited |
set the parallel object
parallel | parallel object to use |
Definition at line 274 of file SGObject.cpp.
|
inherited |
set the version object
version | version object to use |
Definition at line 316 of file SGObject.cpp.
|
inherited |
Sets L1 Regularization coeff default value is 0.0
l1_coefficient | l1_coefficient |
Definition at line 266 of file NeuralNetwork.h.
|
inherited |
Sets L2 Regularization coeff default value is 0.0
l2_coefficient | l2_coefficient |
Definition at line 252 of file NeuralNetwork.h.
|
virtualinherited |
set labels
lab | labels |
Reimplemented from CMachine.
Definition at line 696 of file NeuralNetwork.cpp.
|
virtualinherited |
Sets the layers of the network
layers | An array of CNeuralLayer objects specifying the layers of the network. Must contain at least one input layer. The last layer in the array is treated as the output layer |
Definition at line 55 of file NeuralNetwork.cpp.
|
inherited |
Sets maximum allowable L2 norm for a neurons weights When using this, a good value might be 15 default value -1 (max-norm regularization disabled)
max_norm | maximum allowable L2 norm |
Definition at line 322 of file NeuralNetwork.h.
|
inherited |
Sets maximum number of iterations over the training set. If 0, training will continue until convergence. defualt value is 0
max_num_epochs | maximum number of iterations over the training set |
Definition at line 356 of file NeuralNetwork.h.
|
inherited |
set maximum training time
t | maximimum training time |
Definition at line 82 of file Machine.cpp.
void set_noise_parameter | ( | float64_t | noise_parameter | ) |
Sets noise parameter Controls the strength of the noise, depending on noise_type
noise_parameter | controls the strength of noise |
Definition at line 195 of file Autoencoder.h.
void set_noise_type | ( | EAENoiseType | noise_type | ) |
Sets noise type for denoising autoencoders.
If set to AENT_DROPOUT, inputs are randomly set to zero during each iteration of training with probability noise_parameter.
If set to AENT_GAUSSIAN, gaussian noise with zero mean and noise_parameter standard deviation is added to the inputs.
Default value is AENT_NONE
noise_type | noise type for denoising autoencoders |
Definition at line 179 of file Autoencoder.h.
|
inherited |
Sets optimization method default is NNOM_LBFGS
optimization_method | optimiation method |
Definition at line 238 of file NeuralNetwork.h.
|
inherited |
|
virtualinherited |
Setter for store-model-features-after-training flag
store_model | whether model should be stored after training |
Definition at line 107 of file Machine.cpp.
|
virtualinherited |
A shallow copy. All the SGObject instance variables will be simply assigned and SG_REF-ed.
Reimplemented in CGaussianKernel.
Definition at line 225 of file SGObject.cpp.
|
protectedvirtualinherited |
Stores feature data of underlying model. After this method has been called, it is possible to change the machine's feature data and call apply(), which is then performed on the training feature data that is part of the machine's model.
Base method, has to be implemented in order to allow cross-validation and model selection.
NOT IMPLEMENTED! Has to be done in subclasses
Reimplemented in CKernelMachine, CKNN, CLinearMachine, CLinearMulticlassMachine, CKMeansBase, CTreeMachine< T >, CTreeMachine< ConditionalProbabilityTreeNodeData >, CTreeMachine< RelaxedTreeNodeData >, CTreeMachine< id3TreeNodeData >, CTreeMachine< VwConditionalProbabilityTreeNodeData >, CTreeMachine< CARTreeNodeData >, CTreeMachine< C45TreeNodeData >, CTreeMachine< CHAIDTreeNodeData >, CTreeMachine< NbodyTreeNodeData >, CGaussianProcessMachine, CHierarchical, CDistanceMachine, CKernelMulticlassMachine, and CLinearStructuredOutputMachine.
|
virtualinherited |
Reimplemented in CKernelMachine.
|
virtual |
Trains the autoencoder
data | Training examples |
Reimplemented from CMachine.
Definition at line 96 of file Autoencoder.cpp.
|
protectedvirtualinherited |
trains the network using gradient descent
Definition at line 261 of file NeuralNetwork.cpp.
|
protectedvirtualinherited |
trains the network using L-BFGS
Definition at line 357 of file NeuralNetwork.cpp.
Trains a locked machine on a set of indices. Error if machine is not locked
NOT IMPLEMENTED
indices | index vector (of locked features) that is used for training |
Reimplemented in CKernelMachine.
|
protectedvirtualinherited |
|
protectedvirtualinherited |
returns whether machine require labels for training
Reimplemented in COnlineLinearMachine, CKMeansBase, CHierarchical, CLinearLatentMachine, CVwConditionalProbabilityTree, CConditionalProbabilityTree, and CLibSVMOneClass.
|
virtual |
Computes the activation of the hidden layer given the input data
data | Input features |
Reimplemented from CNeuralNetwork.
Reimplemented in CDeepAutoencoder.
Definition at line 140 of file Autoencoder.cpp.
|
inherited |
unset generic type
this has to be called in classes specializing a template class
Definition at line 336 of file SGObject.cpp.
|
virtualinherited |
Updates the hash of current parameter combination
Definition at line 281 of file SGObject.cpp.
|
inherited |
io
Definition at line 537 of file SGObject.h.
|
protectedinherited |
Describes the connections in the network: if there's a connection from layer i to layer j then m_adj_matrix(i,j) = 1.
Definition at line 596 of file NeuralNetwork.h.
|
protectedinherited |
number of train/test cases the network is expected to deal with. Default value is 1
Definition at line 618 of file NeuralNetwork.h.
|
protected |
For contractive autoencoders [Rifai, 2011], a term:
\[ \frac{\lambda}{N} \sum_{k=0}^{N-1} \left \| J(x_k) \right \|^2_F \]
is added to the error, where \( \left \| J(x_k)) \right \|^2_F \) is the Frobenius norm of the Jacobian of the activations of the hidden layer with respect to its inputs, \( N \) is the batch size, and \( \lambda \) is the contraction coefficient.
Default value is 0.0.
Definition at line 232 of file Autoencoder.h.
|
protectedinherited |
|
protectedinherited |
Probabilty that a hidden layer neuron will be dropped out When using this, the recommended value is 0.5
default value 0.0 (no dropout)
For more details on dropout, see paper [Hinton, 2012]
Definition at line 642 of file NeuralNetwork.h.
|
protectedinherited |
Probabilty that a input layer neuron will be dropped out When using this, a good value might be 0.2
default value 0.0 (no dropout)
For more details on dropout, see this paper [Hinton, 2012]
Definition at line 652 of file NeuralNetwork.h.
|
protectedinherited |
convergence criteria training stops when (E'- E)/E < epsilon where E is the error at the current iterations and E' is the error at the previous iteration default value is 1.0e-5
Definition at line 667 of file NeuralNetwork.h.
|
protectedinherited |
Used to damp the error fluctuations when stochastic gradient descent is used. damping is done according to: error_damped(i) = c*error(i) + (1-c)*error_damped(i-1) where c is the damping coefficient
If -1, the damping coefficient is automatically computed according to: c = 0.99*gd_mini_batch_size/training_set_size + 1e-2;
default value is -1
Definition at line 711 of file NeuralNetwork.h.
|
protectedinherited |
gradient descent learning rate, defualt value 0.1
Definition at line 682 of file NeuralNetwork.h.
|
protectedinherited |
gradient descent learning rate decay learning rate is updated at each iteration i according to: alpha(i)=decay*alpha(i-1) default value is 1.0 (no decay)
Definition at line 689 of file NeuralNetwork.h.
|
protectedinherited |
size of the mini-batch used during gradient descent training, if 0 full-batch training is performed default value is 0
Definition at line 679 of file NeuralNetwork.h.
|
protectedinherited |
gradient descent momentum multiplier
default value is 0.9
For more details on momentum, see this paper [Sutskever, 2013]
Definition at line 699 of file NeuralNetwork.h.
|
inherited |
parameters wrt which we can compute gradients
Definition at line 552 of file SGObject.h.
|
inherited |
Hash of parameter values
Definition at line 555 of file SGObject.h.
|
protectedinherited |
offsets specifying where each layer's parameters and parameter gradients are stored, i.e layer i's parameters are stored at m_params + m_index_offsets[i]
Definition at line 613 of file NeuralNetwork.h.
|
protectedinherited |
True if the network is currently being trained initial value is false
Definition at line 623 of file NeuralNetwork.h.
|
protectedinherited |
L1 Regularization coeff, default value is 0.0
Definition at line 632 of file NeuralNetwork.h.
|
protectedinherited |
L2 Regularization coeff, default value is 0.0
Definition at line 629 of file NeuralNetwork.h.
|
protectedinherited |
network's layers
Definition at line 591 of file NeuralNetwork.h.
|
protectedinherited |
Maximum allowable L2 norm for a neurons weights When using this, a good value might be 15
default value -1 (max-norm regularization disabled)
Definition at line 659 of file NeuralNetwork.h.
|
protectedinherited |
maximum number of iterations over the training set. If 0, training will continue until convergence. defualt value is 0
Definition at line 673 of file NeuralNetwork.h.
|
protectedinherited |
|
inherited |
model selection parameters
Definition at line 549 of file SGObject.h.
|
protected |
Controls the strength of the noise, depending on noise_type
Definition at line 247 of file Autoencoder.h.
|
protected |
Noise type for denoising autoencoders.
If set to AENT_DROPOUT, inputs are randomly set to zero during each iteration of training with probability noise_parameter.
If set to AENT_GAUSSIAN, gaussian noise with zero mean and noise_parameter standard deviation is added to the inputs.
Default value is AENT_NONE
Definition at line 244 of file Autoencoder.h.
|
protectedinherited |
number of neurons in the input layer
Definition at line 585 of file NeuralNetwork.h.
|
protectedinherited |
number of layer
Definition at line 588 of file NeuralNetwork.h.
|
protectedinherited |
Optimization method, default is NNOM_LBFGS
Definition at line 626 of file NeuralNetwork.h.
|
protectedinherited |
Array that specifies which parameters are to be regularized. This is used to turn off regularization for bias parameters
Definition at line 607 of file NeuralNetwork.h.
|
inherited |
parameters
Definition at line 546 of file SGObject.h.
array where all the parameters of the network are stored
Definition at line 602 of file NeuralNetwork.h.
|
protectedinherited |
|
protectedinherited |
|
protectedinherited |
total number of parameters in the network
Definition at line 599 of file NeuralNetwork.h.
|
inherited |
parallel
Definition at line 540 of file SGObject.h.
|
inherited |
version
Definition at line 543 of file SGObject.h.