This page lists ready to run shogun examples for the Static Python interface.
To run the examples issue
python name_of_example.py
# In this example a multi-class support vector machine is trained on a toy data
# set and the trained classifier is used to predict labels of test examples.
# The training algorithm is based on BSVM formulation (L2-soft margin
# and the bias added to the objective function) which is solved by the Improved
# Mitchell-Demyanov-Malozemov algorithm. The training algorithm uses the Gaussian
# kernel of width 2.1 and the regularization constant C=1.2. The bias term of the
# classification rule is not used. The solver stops if the relative duality gap
# falls below 1e-5 and it uses 10MB for kernel cache.
#
# For more details on the used SVM solver see
# V.Franc: Optimization Algorithms for Kernel Methods. Research report.
# CTU-CMP-2005-22. CTU FEL Prague. 2005.
# ftp://cmp.felk.cvut.cz/pub/cmp/articles/franc/Franc-PhD.pdf .
#
def gmnpsvm ():
print 'GMNPSVM'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_multiclass)
sg('new_classifier', 'GMNPSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_multiclass=lm.load_labels('../data/label_train_multiclass.dat')
gmnpsvm()
# In this example a two-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm Gradient Projection Decomposition Technique
# (GPDT) is used with SVM regularization parameter C=1.2 and a Gaussian
# kernel of width 2.1 and 10MB of kernel cache.
#
# For more details on GPDT solver see http://dm.unife.it/gpdt
#
#
def gpbtsvm ():
print 'GPBTSVM'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'GPBTSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_twoclass=lm.load_labels('../data/label_train_twoclass.dat')
gpbtsvm()
# This example shows usage of a k-nearest neighbor (KNN) classification rule on
# a toy data set. The number of the nearest neighbors is set to k=3 and the distances
# are measured by the Euclidean metric. Finally, the KNN rule is applied to predict
# labels of test examples.
def knn ():
print 'KNN'
k=3
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_labels', 'TRAIN', label_train_multiclass)
sg('set_distance', 'EUCLIDIAN', 'REAL')
sg('new_classifier', 'KNN')
sg('train_classifier', k)
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_multiclass=lm.load_labels('../data/label_train_multiclass.dat')
knn()
# In this example a linear two-class classifier is trained based on the Linear
# Discriminant Analysis (LDA) from a toy 2-dimensional examples. The trained
# LDA classifier is used to predict test examples. Note that the LDA classifier
# is optimal under the assumption that both classes are Gaussian distributed with equal
# co-variance. For more details on the LDA see e.g.
# http://en.wikipedia.org/wiki/Linear_discriminant_analysis
#
def lda ():
print 'LDA'
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'LDA')
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
print result
if __name__=='__main__': #svm_light()
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_twoclass=lm.load_labels('../data/label_train_twoclass.dat')
print fm_train_real
lda()
# In this example a two-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm LIBSVM is used with SVM regularization
# parameter C=1 and a Gaussian kernel of width 1.2 and 10MB of kernel cache and
# the precision parameter epsilon=1e-5.
#
# For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/
def libsvm ():
print 'LibSVM'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'LIBSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_twoclass=lm.load_labels('../data/label_train_twoclass.dat')
libsvm()
# In this example a multi-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm LIBSVM is used with SVM regularization
# parameter C=1.2 and the bias in the classification rule switched off and
# a Gaussian kernel of width 2.1 and 10MB of kernel cache and the precision
# parameter epsilon=1e-5.
#
# For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/
def libsvm_multiclass ():
print 'LibSVMMultiClass'
size_cache=10
width=2.1
C=10.
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_multiclass)
sg('new_classifier', 'LIBSVM_MULTICLASS')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_multiclass=lm.load_labels('../data/label_train_multiclass.dat')
libsvm_multiclass()
# In this example a one-class support vector machine classifier is trained on a
# toy data set. The training algorithm finds a hyperplane in the RKHS which
# separates the training data from the origin. The one-class classifier is
# typically used to estimate the support of a high-dimesnional distribution.
# For more details see e.g.
# B. Schoelkopf et al. Estimating the support of a high-dimensional
# distribution. Neural Computation, 13, 2001, 1443-1471.
#
# In the example, the one-class SVM is trained by the LIBSVM solver with the
# regularization parameter C=1.2 and the Gaussian kernel of width 2.1 and the
# precision parameter epsilon=1e-5 and 10MB of the kernel cache.
#
# For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/ .
#
#
def libsvm_oneclass ():
print 'LibSVMOneClass'
size_cache=10
width=2.1
C=10.
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('new_classifier', 'LIBSVM_ONECLASS')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
libsvm_oneclass()
# In this example a two-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm the Minimal Primal Dual SVM is used with SVM
# regularization parameter C=1.2 and a Gaussian kernel of width 2.1 and 10MB of
# kernel cache and the precision parameter epsilon=1e-5.
#
# For more details on the MPD solver see
# Kienzle, W. and B. Schölkopf: Training Support Vector Machines with Multiple
# Equality Constraints. Machine Learning: ECML 2005, 182-193. (Eds.) Carbonell,
# J. G., J. Siekmann, Springer, Berlin, Germany (11 2005)
def mpdsvm ():
print 'MPDSVM'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'MPDSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_twoclass=lm.load_labels('../data/label_train_twoclass.dat')
mpdsvm()
# This example shows how to use the Perceptron algorithm for training a
# two-class linear classifier, i.e. y = sign( <x,w>+b). The Perceptron algorithm
# works by iteratively passing though the training examples and applying the
# update rule on those examples which are misclassified by the current
# classifier. The Perceptron update rule reads
#
# w(t+1) = w(t) + alpha * y_t * x_t
# b(t+1) = b(t) + alpha * y_t
#
# where (x_t,y_t) is feature vector and label (must be +1/-1) of the misclassified example
# (w(t),b(t)) are the current parameters of the linear classifier
# (w(t+1),b(t+1)) are the new parameters of the linear classifier
# alpha is the learning rate.
#
# The Perceptron algorithm iterates until all training examples are correctly
# classified or the prescribed maximal number of iterations is reached.
#
# The learning rate and the maximal number of iterations can be set by
# sg('set_perceptron_parameters', alpha, max_iter);
#
def perceptron ():
print 'Perceptron'
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'PERCEPTRON')
# often does not converge, mind your data!
#sg('train_classifier')
#sg('set_features', 'TEST', fm_test_real)
#result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_twoclass=lm.load_labels('../data/label_train_twoclass.dat')
perceptron()
# In this example a two-class support vector machine classifier is trained on a
# DNA splice-site detection data set and the trained classifier is used to predict
# labels on test set. As training algorithm SVM^light is used with SVM
# regularization parameter C=1.2 and the Weighted Degree kernel of degree 20 and
# the precision parameter epsilon=1e-5.
#
# For more details on the SVM^light see
# T. Joachims. Making large-scale SVM learning practical. In Advances in Kernel
# Methods -- Support Vector Learning, pages 169-184. MIT Press, Cambridge, MA USA, 1999.
#
# For more details on the Weighted Degree kernel see
# G. Raetsch, S.Sonnenburg, and B. Schoelkopf. RASE: recognition of alternatively
# spliced exons in C. elegans. Bioinformatics, 21:369-377, June 2005.
def svm_light ():
print 'SVMLight'
size_cache=10
degree=20
C=1.2
epsilon=1e-5
use_bias=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_kernel', 'WEIGHTEDDEGREE', 'CHAR', size_cache, degree)
sg('set_labels', 'TRAIN', label_train_dna)
try:
sg('new_classifier', 'SVMLIGHT')
except RuntimeError:
return
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
svm_light()
# In this example an agglomerative hierarchical single linkage clustering method
# is used to cluster a given toy data set. Starting with each object being
# assigned to its own cluster clusters are iteratively merged. Here the clusters
# are merged that have the closest (minimum distance, here set via the Euclidean
# distance object) two elements.
def hierarchical ():
print 'Hierarchical'
size_cache=10
merges=3
from sg import sg
sg('set_features', 'TRAIN', fm_train)
sg('set_distance', 'EUCLIDIAN', 'REAL')
sg('new_clustering', 'HIERARCHICAL')
sg('train_clustering', merges)
[merge_distance, pairs]=sg('get_clustering')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_numbers('../data/fm_train_real.dat')
hierarchical()
# In this example the k-means clustering method is used to cluster a given toy
# data set. In k-means clustering one tries to partition n observations into k
# clusters in which each observation belongs to the cluster with the nearest mean.
# The algorithm class constructor takes the number of clusters and a distance to
# be used as input. The distance used in this example is Euclidean distance.
# After training one can fetch the result of clustering by obtaining the cluster
# centers and their radiuses.
def kmeans ():
print 'KMeans'
size_cache=10
k=3
iter=1000
from sg import sg
sg('set_features', 'TRAIN', fm_train)
sg('set_distance', 'EUCLIDIAN', 'REAL')
sg('new_clustering', 'KMEANS')
sg('train_clustering', k, iter)
[radi, centers]=sg('get_clustering')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_numbers('../data/fm_train_real.dat')
kmeans()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'BRAYCURTIS'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix between
# these two matrices is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CBrayCurtisDistance.html.
#
# Obviously, using the Bray Curtis distance is not limited to this showcase
# example.
def bray_curtis_distance ():
print 'BrayCurtisDistance'
from sg import sg
sg('set_distance', 'BRAYCURTIS', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
bray_curtis_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'CANBERRA'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance (dissimilarity ratio) matrix is
# computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance (dissimilarity ratio)
# matrix between these two data sets is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CCanberraMetric.html.
#
# Obviously, using the Canberra distance is not limited to this showcase
# example.
def canberra_metric ():
print 'CanberraMetric'
from sg import sg
sg('set_distance', 'CANBERRA', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
canberra_metric()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored data sets in 'STRING' representation
# (feature type 'CHAR' with alphabet 'DNA') from different files and
# initializes the distance to 'CANBERRA' with feature type 'WORD'.
#
# Data points in this example are defined by the transformation function
# 'convert' and the preprocessing step applied afterwards (defined by
# 'add_preproc' and preprocessor 'SORTWORDSTRING').
#
# The target 'TRAIN' for 'set_features' controls the binding of the given
# data points. In order to compute a pairwise distance matrix by
# 'get_distance_matrix', we have to perform two preprocessing steps for
# input data 'TRAIN'. The method 'convert' transforms the input data to
# a string representation suitable for the selected distance. The individual
# strings are sorted in ascending order after the execution of 'attach_preproc'.
# A pairwise distance matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the binding of the given
# data points 'TRAIN' and 'TEST'. In order to compute a pairwise distance
# matrix between these two data sets by 'get_distance_matrix', we have to
# perform two preprocessing steps for input data 'TEST'. The method 'convert'
# transforms the input data 'TEST' to a string representation suitable for
# the selected distance. The individual strings are sorted in ascending order
# after the execution of 'attach_preproc'. A pairwise distance matrix between
# the data sets 'TRAIN' and 'TEST' is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see
# doc/classshogun_1_1CSortWordString.html,
# doc/classshogun_1_1CPreProc.html,
# doc/classshogun_1_1CStringFeatures.html (method obtain_from_char_features) and
# doc/classshogun_1_1CCanberraWordDistance.html.
#
# Obviously, using the Canberra word distance is not limited to this showcase
# example.
def canberra_word_distance ():
print 'CanberraWordDistance'
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
sg('set_distance', 'CANBERRA', 'WORD')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
canberra_word_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'CHEBYSHEW'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix (maximum of absolute feature
# dimension differences) is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix (maximum
# of absolute feature dimension differences) between these two data sets is
# computed.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CChebyshewMetric.html.
#
# Obviously, using the Chebyshew distance is not limited to this showcase
# example.
def chebyshew_metric ():
print 'ChebyshewMetric'
from sg import sg
sg('set_distance', 'CHEBYSHEW', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
chebyshew_metric()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'CHISQUARE'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix between
# these two matrices is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CChiSquareDistance.html.
#
# Obviously, using the ChiSquare distance is not limited to this showcase
# example.
def chi_square_distance ():
print 'ChiSquareDistance'
from sg import sg
sg('set_distance', 'CHISQUARE', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
chi_square_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'COSINE'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix between
# these two data sets is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CCosineDistance.html.
#
# Obviously, using the Cosine distance is not limited to this showcase
# example.
def cosine_distance ():
print 'CosineDistance'
from sg import sg
sg('set_distance', 'COSINE', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
cosine_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'EUCLIDIAN'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix between
# these two data sets is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CEuclidianDistance.html.
#
# Obviously, using the Euclidian distance is not limited to this showcase
# example.
def euclidian_distance ():
print 'EuclidianDistance'
from sg import sg
sg('set_distance', 'EUCLIDIAN', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
euclidian_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'GEODESIC'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance (shortest path on a sphere) matrix is
# computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance (shortest path on
# a sphere) matrix between these two data sets is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CGeodesicMetric.html.
#
# Obviously, using the Geodesic distance is not limited to this showcase
# example.
def geodesic_metric ():
print 'GeodesicMetric'
from sg import sg
sg('set_distance', 'GEODESIC', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
geodesic_metric()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored data sets in 'STRING' representation
# (feature type 'CHAR' with alphabet 'DNA') from different files and
# initializes the distance to 'HAMMING' with feature type 'WORD'.
#
# Data points in this example are defined by the transformation function
# 'convert' and the preprocessing step applied afterwards (defined by
# 'add_preproc' and preprocessor 'SORTWORDSTRING').
#
# The target 'TRAIN' for 'set_features' controls the binding of the given
# data points. In order to compute a pairwise distance matrix by
# 'get_distance_matrix', we have to perform two preprocessing steps for
# input data 'TRAIN'. The method 'convert' transforms the input data to
# a string representation suitable for the selected distance. The individual
# strings are sorted in ascending order after the execution of 'attach_preproc'.
# A pairwise distance matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the binding of the given
# data points 'TRAIN' and 'TEST'. In order to compute a pairwise distance
# matrix between these two data sets by 'get_distance_matrix', we have to
# perform two preprocessing steps for input data 'TEST'. The method 'convert'
# transforms the input data 'TEST' to a string representation suitable for
# the selected distance. The individual strings are sorted in ascending order
# after the execution of 'attach_preproc'. A pairwise distance matrix between
# the data sets 'TRAIN' and 'TEST' is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see
# doc/classshogun_1_1CSortWordString.html,
# doc/classshogun_1_1CPreProc.html,
# doc/classshogun_1_1CStringFeatures.html (method obtain_from_char_features) and
# doc/classshogun_1_1CHammingWordDistance.html.
#
# Obviously, using the Hamming word distance is not limited to this showcase
# example.
def hamming_word_distance ():
print 'HammingWordDistance'
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
sg('set_distance', 'HAMMING', 'WORD')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
hamming_word_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'JENSEN'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance (divergence measure based on the
# Kullback-Leibler divergence) matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance (divergence measure
# based on the Kullback-Leibler divergence) matrix between these two data sets
# is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CJensenMetric.html.
#
# Obviously, using the Jensen-Shannon distance/divergence is not limited to
# this showcase example.
def jensen_metric ():
print 'JensenMetric'
from sg import sg
sg('set_distance', 'JENSEN', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
jensen_metric()
# n approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'MANHATTAN'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance (sum of absolute feature
# dimension differences) matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance (sum of absolute
# feature dimension differences) matrix between these two data sets is
# computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CManhattanMetric.html.
#
# Obviously, using the Manhattan distance is not limited to this showcase
# example.
def manhattan_metric ():
print 'ManhattanMetric'
from sg import sg
sg('set_distance', 'MANHATTAN', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
manhattan_metric()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored data sets in 'STRING' representation
# (feature type 'CHAR' with alphabet 'DNA') from different files and
# initializes the distance to 'MANHATTAN' with feature type 'WORD'.
#
# Data points in this example are defined by the transformation function
# 'convert' and the preprocessing step applied afterwards (defined by
# 'add_preproc' and preprocessor 'SORTWORDSTRING').
#
# The target 'TRAIN' for 'set_features' controls the binding of the given
# data points. In order to compute a pairwise distance matrix by
# 'get_distance_matrix', we have to perform two preprocessing steps for
# input data 'TRAIN'. The method 'convert' transforms the input data to
# a string representation suitable for the selected distance. The individual
# strings are sorted in ascending order after the execution of 'attach_preproc'.
# A pairwise distance matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the binding of the given
# data points 'TRAIN' and 'TEST'. In order to compute a pairwise distance
# matrix between these two data sets by 'get_distance_matrix', we have to
# perform two preprocessing steps for input data 'TEST'. The method 'convert'
# transforms the input data 'TEST' to a string representation suitable for
# the selected distance. The individual strings are sorted in ascending order
# after the execution of 'attach_preproc'. A pairwise distance matrix between
# the data sets 'TRAIN' and 'TEST' is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see
# doc/classshogun_1_1CSortWordString.html,
# doc/classshogun_1_1CPreProc.html,
# doc/classshogun_1_1CStringFeatures.html (method obtain_from_char_features) and
# doc/classshogun_1_1CManhattanWordDistance.html.
#
# Obviously, using the Manhattan word distance is not limited to this showcase
# example.
def manhattan_word_distance ():
print 'ManhattanWordDistance'
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
sg('set_distance', 'MANHATTAN', 'WORD')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
manhattan_word_distance()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'MINKOWSKI' with
# norm 'k'. Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance matrix is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance matrix between
# these two data sets is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CMinkowskiMetric.html.
#
# Obviously, using the Minkowski metric is not limited to this showcase
# example.
def minkowski_metric ():
print 'MinkowskiMetric'
k=3.
from sg import sg
sg('set_distance', 'MINKOWSKI', 'REAL', k)
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
minkowski_metric()
# An approach as applied below, which shows the processing of input data
# from a file becomes a crucial factor for writing your own sample applications.
# This approach is just one example of what can be done using the distance
# functions provided by shogun.
#
# First, you need to determine what type your data will be, because this
# will determine the distance function you can use.
#
# This example loads two stored matrices of real values (feature type 'REAL')
# from different files and initializes the distance to 'TANIMOTO'.
# Each column of the matrices corresponds to one data point.
#
# The target 'TRAIN' for 'set_features' controls the processing of the given
# data points, where a pairwise distance (extended Jaccard coefficient)
# matrix is computed by 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix' and
# target 'TRAIN'.
#
# The target 'TEST' for 'set_features' controls the processing of the given
# data points 'TRAIN' and 'TEST', where a pairwise distance (extended
# Jaccard coefficient) matrix between these two data sets is computed by
# 'get_distance_matrix'.
#
# The resulting distance matrix can be reaccessed by 'get_distance_matrix'
# and target 'TEST'. The 'TRAIN' distance matrix ceased to exist.
#
# For more details see doc/classshogun_1_1CTanimotoDistance.html.
#
# Obviously, using the Tanimoto distance/coefficient is not limited to
# this showcase example.
def tanimoto_distance ():
print 'TanimotoDistance'
from sg import sg
sg('set_distance', 'TANIMOTO', 'REAL')
sg('set_features', 'TRAIN', fm_train_real)
dm=sg('get_distance_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
dm=sg('get_distance_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
tanimoto_distance()
# In this example the Histogram algorithm object computes a histogram over all
# 16bit unsigned integers in the features.
def histogram ():
print 'Histogram'
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
# sg('new_distribution', 'HISTOGRAM')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
# sg('train_distribution')
# histo=sg('get_histogram')
# num_examples=11
# num_param=sg('get_histogram_num_model_parameters')
# for i in xrange(num_examples):
# for j in xrange(num_param):
# sg('get_log_derivative %d %d' % (j, i))
# sg('get_log_likelihood')
# sg('get_log_likelihood_sample')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_dna('../data/fm_train_dna.dat')
fm_cube=lm.load_cubes('../data/fm_train_cube.dat')
histogram()
# In this example a hidden markov model with 3 states and 6 transitions is trained
# on a string data set.
def hmm ():
print 'HMM'
N=3
M=6
order=1
hmms=list()
liks=list()
from sg import sg
sg('new_hmm',N, M)
sg('set_features', 'TRAIN', fm_cube, 'CUBE')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order)
sg('bw')
hmm=sg('get_hmm')
sg('new_hmm', N, M)
sg('set_hmm', hmm[0], hmm[1], hmm[2], hmm[3])
likelihood=sg('hmm_likelihood')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_dna('../data/fm_train_dna.dat')
fm_cube=lm.load_cubes('../data/fm_train_cube.dat')
hmm()
# Trains an inhomogeneous Markov chain of order 3 on a DNA string data set. Due to
# the structure of the Markov chain it is very similar to a HMM with just one
# chain of connected hidden states - that is why we termed this linear HMM.
def linear_hmm ():
print 'LinearHMM'
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
# sg('new_distribution', 'LinearHMM')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
# sg('train_distribution')
# histo=sg('get_histogram')
# num_examples=11
# num_param=sg('get_histogram_num_model_parameters')
# for i in xrange(num_examples):
# for j in xrange(num_param):
# sg('get_log_derivative %d %d' % (j, i))
# sg('get_log_likelihood')
# sg('get_log_likelihood_sample')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_dna('../data/fm_train_dna.dat')
fm_cube=lm.load_cubes('../data/fm_train_cube.dat')
linear_hmm()
# This is an example for the initialization of the chi2-kernel on real data, where
# each column of the matrices corresponds to one training/test example.
def chi2 ():
print 'Chi2'
width=1.4
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'CHI2', 'REAL', size_cache, width)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
chi2()
# This is an example for the initialization of a combined kernel, which is a weighted sum of
# in this case three kernels on real valued data. The sub-kernel weights are all set to 1.
#
def combined ():
print 'Combined'
size_cache=10
weight=1.
from sg import sg
sg('clean_kernel')
sg('clean_features', 'TRAIN')
sg('clean_features', 'TEST')
sg('set_kernel', 'COMBINED', size_cache)
sg('add_kernel', weight, 'LINEAR', 'REAL', size_cache)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', size_cache, 1.)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
sg('add_kernel', weight, 'POLY', 'REAL', size_cache, 3, False)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
combined()
# This is an example for the initialization of the CommUlongString-kernel. This kernel
# sums over k-mere matches (k='order'). For efficient computing a preprocessor is used
# that extracts and sorts all k-mers. If 'use_sign' is set to one each k-mere is counted
# only once.
def comm_ulong_string ():
print 'CommUlongString'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('add_preproc', 'SORTULONGSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'ULONG', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'ULONG', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'COMMSTRING', 'ULONG', size_cache, use_sign, normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
comm_ulong_string()
# This is an example for the initialization of the CommWordString-kernel (aka
# Spectrum or n-gram kernel; its name is derived from the unix command comm). This kernel
# sums over k-mere matches (k='order'). For efficient computing a preprocessor is used
# that extracts and sorts all k-mers. If 'use_sign' is set to one each k-mere is counted
# only once.
def comm_word_string ():
print 'CommWordString'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'COMMSTRING', 'WORD', size_cache, use_sign, normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
comm_word_string()
# The constant kernel gives a trivial kernel matrix with all entries set to the same value
# defined by the argument 'c'.
#
def const ():
print 'Const'
c=23.
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'CONST', 'REAL', size_cache, c)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
const()
# This is an example for the initialization of the diag-kernel.
# The diag kernel has all kernel matrix entries but those on
# the main diagonal set to zero.
def diag ():
print 'Diag'
diag=23.
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'DIAG', 'REAL', size_cache, diag)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
diag()
# The FixedDegree String kernel takes as input two strings of same size and counts the number of matches of length d.
def fixed_degree_string ():
print 'FixedDegreeString'
size_cache=10
degree=3
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'FIXEDDEGREE', 'CHAR', size_cache, degree)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
fixed_degree_string()
# The well known Gaussian kernel (swiss army knife for SVMs) on dense real valued features.
def gaussian ():
print 'Gaussian'
width=1.9
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
gaussian()
# An experimental kernel inspired by the WeightedDegreePositionStringKernel and the Gaussian kernel.
# The idea is to shift the dimensions of the input vectors against eachother. 'shift_step' is the step
# size of the shifts and max_shift is the maximal shift.
def gaussian_shift ():
print 'GaussianShift'
width=1.9
max_shift=2
shift_step=1
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'GAUSSIANSHIFT', 'REAL', size_cache, width, max_shift, shift_step)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
gaussian_shift()
# This is an example for the initialization of a linear kernel on real valued
# data using scaling factor 1.2.
def linear ():
print 'Linear'
scale=1.2
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'LINEAR', 'REAL', size_cache, scale)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
linear()
# This is an example for the initialization of a linear kernel on raw byte
# data.
def linear_byte ():
print 'LinearByte'
from sg import sg
#import pdb
#pdb.set_trace()
sg('set_features', 'TRAIN', fm_train_byte)
sg('set_features', 'TEST', fm_test_byte, 'RAWBYTE')
sg('set_kernel', 'LINEAR', 'BYTE', 10)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from numpy import ubyte
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_byte=ubyte(lm.load_numbers('../data/fm_train_byte.dat'))
fm_test_byte=ubyte(lm.load_numbers('../data/fm_test_byte.dat'))
linear_byte()
# This is an example for the initialization of a linear kernel on string data. The
# strings are all of the same length and consist of the characters 'ACGT' corresponding
# to the DNA-alphabet. Each column of the matrices of type char corresponds to
# one training/test example.
def linear_string ():
print 'LinearString'
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'LINEAR', 'CHAR', size_cache)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
linear_string()
# This is an example for the initialization of a linear kernel on word (2byte)
# data.
def linear_word ():
print 'LinearWord'
size_cache=10
scale=1.4
from sg import sg
sg('set_features', 'TRAIN', fm_train_word)
sg('set_features', 'TEST', fm_test_word)
sg('set_kernel', 'LINEAR', 'WORD', size_cache, scale)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
from numpy import ushort
lm=LoadMatrix()
fm_train_word=ushort(lm.load_numbers('../data/fm_test_word.dat'))
fm_test_word=ushort(lm.load_numbers('../data/fm_test_word.dat'))
linear_word()
# This is an example for the initialization of the local alignment kernel on
# DNA sequences, where each column of the matrices of type char corresponds to
# one training/test example.
def local_alignment_string():
print 'LocalAlignmentString'
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'LOCALALIGNMENT', 'CHAR', size_cache)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
local_alignment_string()
# This example initializes the locality improved string kernel. The locality improved string
# kernel is defined on sequences of the same length and inspects letters matching at
# corresponding positions in both sequences. The kernel sums over all matches in windows of
# length l and takes this sum to the power of 'inner_degree'. The sum over all these
# terms along the sequence is taken to the power of 'outer_degree'.
def locality_improved_string ():
print 'LocalityImprovedString'
size_cache=10
length=5
inner_degree=5
outer_degree=inner_degree+2
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'LIK', 'CHAR', size_cache, length, inner_degree, outer_degree)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
locality_improved_string()
# This is an example initializing the oligo string kernel which takes distances
# between matching oligos (k-mers) into account via a gaussian. Variable 'k' defines the length
# of the oligo and variable 'w' the width of the gaussian. The oligo string kernel is
# implemented for the DNA-alphabet 'ACGT'.
#
def oligo_string ():
print 'OligoString'
size_cache=10
k=3
width=1.2
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'OLIGO', 'CHAR', size_cache, k, width)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
oligo_string()
def plugin_estimate_histogram ():
print 'PluginEstimate w/ HistogramWord'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
pseudo_pos=1e-1
pseudo_neg=1e-1
sg('new_plugin_estimator', pseudo_pos, pseudo_neg)
sg('set_labels', 'TRAIN', label_train_dna)
sg('train_estimator')
sg('set_kernel', 'HISTOGRAM', 'WORD', size_cache)
km=sg('get_kernel_matrix', 'TRAIN')
# not supported yet
# lab=sg('plugin_estimate_classify')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
plugin_estimate_histogram()
# This example initializes the polynomial kernel with real data.
# If variable 'inhomogene' is 'true' +1 is added to the scalar product
# before taking it to the power of 'degree'. If 'use_normalization' is
# set to 'true' then kernel matrix will be normalized by the square roots
# of the diagonal entries.
def poly ():
print 'Poly'
degree=4
inhomogene=False
use_normalization=True
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'POLY', 'REAL', size_cache, degree, inhomogene, use_normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
poly()
# This is an example for the initialization of the PolyMatchString kernel on string data.
# The PolyMatchString kernel sums over the matches of two stings of the same length and
# takes the sum to the power of 'degree'. The strings consist of the characters 'ACGT' corresponding
# to the DNA-alphabet. Each column of the matrices of type char corresponds to
# one training/test example.
def poly_match_string ():
print 'PolyMatchString'
size_cache=10
degree=3
inhomogene=False
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'POLYMATCH', 'CHAR', size_cache, degree, inhomogene)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
poly_match_string()
# The PolyMatchWordString kernel is defined on strings of equal length.
# The kernel sums over the matches of two stings of the same length and
# takes the sum to the power of 'degree'. The strings in this example
# consist of the characters 'ACGT' corresponding to the DNA-alphabet. Each
# column of the matrices of type char corresponds to one training/test example.
def poly_match_word ():
print 'PolyMatchWord'
size_cache=10
degree=2
inhomogene=True
normalize=True
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
from sg import sg
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'POLYMATCH', 'WORD', size_cache, degree, inhomogene, normalize)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
poly_match_word()
# The SalzbergWordString kernel implements the Salzberg kernel.
#
# It is described in
#
# Engineering Support Vector Machine Kernels That Recognize Translation Initiation Sites
# A. Zien, G.Raetsch, S. Mika, B. Schoelkopf, T. Lengauer, K.-R. Mueller
#
def plugin_estimate_salzberg ():
print 'PluginEstimate w/ SalzbergWord'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
pseudo_pos=1e-1
pseudo_neg=1e-1
sg('new_plugin_estimator', pseudo_pos, pseudo_neg)
sg('set_labels', 'TRAIN', label_train_dna)
sg('train_estimator')
sg('set_kernel', 'SALZBERG', 'WORD', size_cache)
#sg('set_prior_probs', 0.4, 0.6)
sg('set_prior_probs_from_labels', label_train_dna)
km=sg('get_kernel_matrix', 'TRAIN')
# not supported yet
# lab=sg('plugin_estimate_classify')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
plugin_estimate_salzberg()
# The standard Sigmoid kernel computed on dense real valued features.
def sigmoid ():
print 'Sigmoid'
num_feats=11
gamma=1.2
coef0=1.3
size_cache=10
from sg import sg
sg('set_features', 'TRAIN', fm_train_real)
sg('set_features', 'TEST', fm_test_real)
sg('set_kernel', 'SIGMOID', 'REAL', size_cache, gamma, coef0)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
sigmoid()
# SimpleLocalityImprovedString kernel, is a ``simplified'' and better performing version of the Locality improved kernel.
def simple_locality_improved_string ():
print 'SimpleLocalityImprovedString'
size_cache=10
length=5
inner_degree=5
outer_degree=inner_degree+2
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'SLIK', 'CHAR', size_cache, length, inner_degree, outer_degree)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
simple_locality_improved_string()
# The WeightedCommWordString kernel may be used to compute the weighted
# spectrum kernel (i.e. a spectrum kernel for 1 to K-mers, where each k-mer
# length is weighted by some coefficient \f$\beta_k\f$) from strings that have
# been mapped into unsigned 16bit integers.
#
# These 16bit integers correspond to k-mers. To applicable in this kernel they
# need to be sorted (e.g. via the SortWordString pre-processor).
#
# It basically uses the algorithm in the unix "comm" command (hence the name)
# to compute:
#
# k({\bf x},({\bf x'})= \sum_{k=1}^K\beta_k\Phi_k({\bf x})\cdot \Phi_k({\bf x'})
#
# where \f$\Phi_k\f$ maps a sequence \f${\bf x}\f$ that consists of letters in
# \f$\Sigma\f$ to a feature vector of size \f$|\Sigma|^k\f$. In this feature
# vector each entry denotes how often the k-mer appears in that \f${\bf x}\f$.
#
# Note that this representation is especially tuned to small alphabets
# (like the 2-bit alphabet DNA), for which it enables spectrum kernels
# of order 8.
#
# For this kernel the linadd speedups are quite efficiently implemented using
# direct maps.
#
def weighted_comm_word_string ():
print 'WeightedCommWordString'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'WEIGHTEDCOMMSTRING', 'WORD', size_cache, use_sign, normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
label_train_dna=lm.load_labels('../data/label_train_dna.dat')
weighted_comm_word_string()
# The Weighted Degree Position String kernel (Weighted Degree kernel with shifts).
#
# The WD-shift kernel of order d compares two sequences X and
# Y of length L by summing all contributions of k-mer matches of
# lengths k in 1...d, weighted by coefficients beta_k
# allowing for a positional tolerance of up to shift s.
#
def weighted_degree_position_string ():
print 'WeightedDegreePositionString'
size_cache=10
degree=20
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'WEIGHTEDDEGREEPOS', 'CHAR', size_cache, degree)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
weighted_degree_position_string()
# The Weighted Degree String kernel.
#
# The WD kernel of order d compares two sequences X and
# Y of length L by summing all contributions of k-mer matches of
# lengths k in 1...d , weighted by coefficients beta_k. It
# is defined as
#
# k(X, Y)=\sum_{k=1}^d\beta_k\sum_{l=1}^{L-k+1}I(u_{k,l}(X)=u_{k,l}(Y)).
#
# Here, $u_{k,l}(X)$ is the string of length k starting at position
# l of the sequence X and I(.) is the indicator function
# which evaluates to 1 when its argument is true and to 0
# otherwise.
#
def weighted_degree_string ():
print 'WeightedDegreeString'
size_cache=10
degree=20
from sg import sg
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('set_kernel', 'WEIGHTEDDEGREE', 'CHAR', size_cache, degree)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
weighted_degree_string()
def mkl_multiclass ():
print 'mkl_multiclass'
size_cache=10
width=1.2
C=1.2
epsilon=1e-5
mkl_eps=0.001
mkl_norm=1.5
weight=1.0
from sg import sg
sg('clean_kernel')
sg('clean_features', 'TRAIN')
sg('clean_features', 'TEST')
sg('set_kernel', 'COMBINED', size_cache)
sg('add_kernel', weight, 'LINEAR', 'REAL', size_cache)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', size_cache, width)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
sg('add_kernel', weight, 'POLY', 'REAL', size_cache, 2)
sg('add_features', 'TRAIN', fm_train_real)
sg('add_features', 'TEST', fm_test_real)
sg('set_labels', 'TRAIN', label_train_multiclass)
sg('new_classifier', 'MKL_MULTICLASS')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('mkl_parameters', mkl_eps, 0.0, mkl_norm)
sg('train_classifier')
#sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
print result
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
label_train_multiclass=lm.load_labels('../data/label_train_multiclass.dat')
mkl_multiclass()
from sg import sg
from numpy import *
num=100
weight=1.
labels=concatenate((-ones([1,num]), ones([1,num])),1)[0]
features=concatenate((random.normal(size=(2,num))-1,random.normal(size=(2,num))+1),1)
tube_epsilon=1e-2
sg('new_classifier', 'MKL_REGRESSION')
sg('c', 1.)
sg('svr_tube_epsilon', tube_epsilon)
sg('set_labels', 'TRAIN', labels)
sg('add_features', 'TRAIN', features)
sg('add_features', 'TRAIN', features)
sg('add_features', 'TRAIN', features)
sg('set_kernel', 'COMBINED', 100)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 100.)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 10.)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 1.)
sg('train_classifier')
[bias, alphas]=sg('get_svm');
from sg import sg
from numpy import *
num=100
weight=1.
labels=concatenate((-ones([1,num]), ones([1,num])),1)[0]
features=concatenate((random.normal(size=(2,num))-1,random.normal(size=(2,num))+1),1)
sg('c', 10.)
sg('new_classifier', 'MKL_CLASSIFICATION')
sg('set_labels', 'TRAIN', labels)
sg('add_features', 'TRAIN', features)
sg('add_features', 'TRAIN', features)
sg('add_features', 'TRAIN', features)
sg('set_kernel', 'COMBINED', 100)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 100.)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 10.)
sg('add_kernel', weight, 'GAUSSIAN', 'REAL', 100, 1.)
sg('train_classifier')
[bias, alphas]=sg('get_svm');
# In this example a kernel matrix is computed for a given real-valued data set.
# The kernel used is the Chi2 kernel which operates on real-valued vectors. It
# computes the chi-squared distance between sets of histograms. It is a very
# useful distance in image recognition (used to detect objects). The preprocessor
# LogPlusOne adds one to a dense real-valued vector and takes the logarithm of
# each component of it. It is most useful in situations where the inputs are
# counts: When one compares differences of small counts any difference may matter
# a lot, while small differences in large counts don't. This is what this log
# transformation controls for.
def log_plus_one ():
print 'LogPlusOne'
width=1.4
size_cache=10
from sg import sg
sg('add_preproc', 'LOGPLUSONE')
sg('set_kernel', 'CHI2', 'REAL', size_cache, width)
sg('set_features', 'TRAIN', fm_train_real)
sg('attach_preproc', 'TRAIN')
km=sg('get_kernel_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
sg('attach_preproc', 'TEST')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
log_plus_one()
# In this example a kernel matrix is computed for a given real-valued data set.
# The kernel used is the Chi2 kernel which operates on real-valued vectors. It
# computes the chi-squared distance between sets of histograms. It is a very
# useful distance in image recognition (used to detect objects). The preprocessor
# NormOne, normalizes vectors to have norm 1.
def norm_one ():
print 'NormOne'
width=1.4
size_cache=10
from sg import sg
sg('add_preproc', 'NORMONE')
sg('set_kernel', 'CHI2', 'REAL', size_cache, width)
sg('set_features', 'TRAIN', fm_train_real)
sg('attach_preproc', 'TRAIN')
km=sg('get_kernel_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
sg('attach_preproc', 'TEST')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
norm_one()
# In this example a kernel matrix is computed for a given real-valued data set.
# The kernel used is the Chi2 kernel which operates on real-valued vectors. It
# computes the chi-squared distance between sets of histograms. It is a very
# useful distance in image recognition (used to detect objects). The preprocessor
# PruneVarSubMean substracts the mean from each feature and removes features that
# have zero variance.
def prune_var_sub_mean ():
print 'PruneVarSubMean'
width=1.4
size_cache=10
divide_by_std=True
from sg import sg
sg('add_preproc', 'PRUNEVARSUBMEAN', divide_by_std)
sg('set_kernel', 'CHI2', 'REAL', size_cache, width)
sg('set_features', 'TRAIN', fm_train_real)
sg('attach_preproc', 'TRAIN')
km=sg('get_kernel_matrix', 'TRAIN')
sg('set_features', 'TEST', fm_test_real)
sg('attach_preproc', 'TEST')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_real=lm.load_numbers('../data/fm_train_real.dat')
fm_test_real=lm.load_numbers('../data/fm_test_real.dat')
prune_var_sub_mean()
# In this example a kernel matrix is computed for a given string data set. The
# CommUlongString kernel is used to compute the spectrum kernel from strings that
# have been mapped into unsigned 64bit integers. These 64bit integers correspond
# to k-mers. To be applicable in this kernel the mapped k-mers have to be sorted.
# This is done using the SortUlongString preprocessor, which sorts the indivual
# strings in ascending order. The kernel function basically uses the algorithm in
# the unix "comm" command (hence the name). Note that this representation enables
# spectrum kernels of order 8 for 8bit alphabets (like binaries) and order 32 for
# 2-bit alphabets like DNA. For this kernel the linadd speedups are implemented
# (though there is room for improvement here when a whole set of sequences is
# ADDed) using sorted lists.
def sort_ulong_string ():
print 'CommUlongString'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('add_preproc', 'SORTULONGSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'ULONG', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'ULONG', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'COMMSTRING', 'ULONG', size_cache, use_sign, normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
sort_ulong_string()
# In this example a kernel matrix is computed for a given string data set. The
# CommWordString kernel is used to compute the spectrum kernel from strings that
# have been mapped into unsigned 16bit integers. These 16bit integers correspond
# to k-mers. To be applicable in this kernel the mapped k-mers have to be sorted.
# This is done using the SortWordString preprocessor, which sorts the indivual
# strings in ascending order. The kernel function basically uses the algorithm in
# the unix "comm" command (hence the name). Note that this representation is
# especially tuned to small alphabets (like the 2-bit alphabet DNA), for which it
# enables spectrum kernels of order up to 8. For this kernel the linadd speedups
# are quite efficiently implemented using direct maps.
def sort_word_string ():
print 'CommWordString'
size_cache=10
order=3
gap=0
reverse='n' # bit silly to not use boolean, set 'r' to yield true
use_sign=False
normalization='FULL'
from sg import sg
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'COMMSTRING', 'WORD', size_cache, use_sign, normalization)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train_dna=lm.load_dna('../data/fm_train_dna.dat')
fm_test_dna=lm.load_dna('../data/fm_test_dna.dat')
sort_word_string()
# In this example a kernelized version of ridge regression (KRR) is trained on a
# real-valued data set. The KRR is trained with regularization parameter tau=1e-6
# and a gaussian kernel with width=0.8.
def krr ():
print 'KRR'
size_cache=10
width=2.1
C=1.2
tau=1e-6
from sg import sg
sg('set_features', 'TRAIN', fm_train)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train)
sg('new_regression', 'KRR')
sg('krr_tau', tau)
sg('c', C)
sg('train_regression')
sg('set_features', 'TEST', fm_test)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_numbers('../data/fm_train_real.dat')
fm_test=lm.load_numbers('../data/fm_test_real.dat')
label_train=lm.load_labels('../data/label_train_twoclass.dat')
krr()
# In this example a support vector regression algorithm is trained on a
# real-valued toy data set. The underlying library used for the SVR training is
# LIBSVM. The SVR is trained with regularization parameter C=1 and a gaussian
# kernel with width=2.1.
#
# For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/ .
def libsvr ():
print 'LibSVR'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
tube_epsilon=1e-2
from sg import sg
sg('set_features', 'TRAIN', fm_train)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train)
sg('new_regression', 'LIBSVR')
sg('svr_tube_epsilon', tube_epsilon)
sg('c', C)
sg('train_regression')
sg('set_features', 'TEST', fm_test)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_numbers('../data/fm_train_real.dat')
fm_test=lm.load_numbers('../data/fm_test_real.dat')
label_train=lm.load_labels('../data/label_train_twoclass.dat')
libsvr()
# In this example a support vector regression algorithm is trained on a
# real-valued toy data set. The underlying library used for the SVR training is
# SVM^light. The SVR is trained with regularization parameter C=1 and a gaussian
# kernel with width=2.1.
#
# For more details on the SVM^light see
# T. Joachims. Making large-scale SVM learning practical. In Advances in Kernel
# Methods -- Support Vector Learning, pages 169-184. MIT Press, Cambridge, MA USA, 1999.
def svr_light ():
print 'SVRLight'
size_cache=10
width=2.1
C=1.2
epsilon=1e-5
tube_epsilon=1e-2
from sg import sg
sg('set_features', 'TRAIN', fm_train)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train)
try:
sg('new_regression', 'SVRLIGHT')
except RuntimeError:
return
sg('svr_tube_epsilon', tube_epsilon)
sg('c', C)
sg('train_regression')
sg('set_features', 'TEST', fm_test)
result=sg('classify')
if __name__=='__main__':
from tools.load import LoadMatrix
lm=LoadMatrix()
fm_train=lm.load_numbers('../data/fm_train_real.dat')
fm_test=lm.load_numbers('../data/fm_test_real.dat')
label_train=lm.load_labels('../data/label_train_twoclass.dat')
svr_light()