diff --git a/plot_elm_comparision.py b/plot_elm_comparision.py new file mode 100644 index 0000000..a066a7a --- /dev/null +++ b/plot_elm_comparision.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +====================== +ELM Classifiers Comparison +====================== +A comparison of a several ELMClassifiers with different types of hidden +layer activations. +ELMClassifier is a classifier based on the Extreme Learning Machine, +a single layer feedforward network with random hidden layer components +and least squares fitting of the hidden->output weights by default [1][2] +The point of this example is to illustrate the nature of decision boundaries +with different hidden layer activation types and regressors. +This should be taken with a grain of salt, as the intuition conveyed by +these examples does not necessarily carry over to real datasets. +In particular in high dimensional spaces data can more easily be separated +linearly and the simplicity of classifiers such as naive Bayes and linear SVMs +might lead to better generalization. +The plots show training points in solid colors and testing points +semi-transparent. The lower right shows the classification accuracy on the test +set. +References +__________ +.. [1] http://www.extreme-learning-machines.org +.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine: + Theory and Applications", Neurocomputing, vol. 70, pp. 489-501, + 2006. +=============================================================================== +Basis Functions: + gaussian rbf : exp(-gamma * (||x-c||/r)^2) + tanh : np.tanh(a) + sinsq : np.power(np.sin(a), 2.0) + tribas : np.clip(1.0 - np.fabs(a), 0.0, 1.0) + hardlim : np.array(a > 0.0, dtype=float) + where x : input pattern + a : dot_product(x, c) + b + c,r : randomly generated components +Label Legend: + ELM(10,tanh) :10 tanh units + ELM(10,tanh,LR) :10 tanh units, LogisticRegression + ELM(10,sinsq) :10 sin*sin units + ELM(10,tribas) :10 tribas units + ELM(10,hardlim) :10 hardlim units + ELM(20,rbf(0.1)) :20 rbf units gamma=0.1 +""" +print __doc__ + + +# Code source: Gael Varoqueux +# Andreas Mueller +# Modified for Documentation merge by Jaques Grobler +# Modified for Extreme Learning Machine Classifiers by David Lambert +# License: BSD + +import numpy as np +import pylab as pl + +from matplotlib.colors import ListedColormap +from sklearn.datasets import make_moons, make_circles, make_classification +from sklearn.preprocessing import StandardScaler +from sklearn.model_selection import train_test_split +from sklearn.linear_model import LogisticRegression + +from elm import GenELMClassifier +from random_layer import RBFRandomLayer, MLPRandomLayer + + +def get_data_bounds(X): + h = .02 # step size in the mesh + + x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 + y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 + + xx, yy = np.meshgrid(np.arange(x_min, x_max, h), + np.arange(y_min, y_max, h)) + + return (x_min, x_max, y_min, y_max, xx, yy) + + +def plot_data(ax, X_train, y_train, X_test, y_test, xx, yy): + cm = ListedColormap(['#FF0000', '#0000FF']) + # Plot the training points + ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm) + # and testing points + ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm, alpha=0.6) + ax.set_xlim(xx.min(), xx.max()) + ax.set_ylim(yy.min(), yy.max()) + ax.set_xticks(()) + ax.set_yticks(()) + + +def plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z): + cm = pl.cm.RdBu + cm_bright = ListedColormap(['#FF0000', '#0000FF']) + + ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) + + # Plot also the training points + ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) + # and testing points + ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) + + ax.set_xlim(xx.min(), xx.max()) + ax.set_ylim(yy.min(), yy.max()) + ax.set_xticks(()) + ax.set_yticks(()) + + ax.set_title(name) + ax.text(xx.max() - 0.3, yy.min() + 0.3, ('%.2f' % score).lstrip('0'), + size=13, horizontalalignment='right') + + +def make_datasets(): + return [make_moons(n_samples=200, noise=0.3, random_state=0), + make_circles(n_samples=200, noise=0.2, factor=0.5, random_state=1), + make_linearly_separable()] + + +def make_classifiers(): + + names = ["ELM(10,tanh)", "ELM(10,tanh,LR)", "ELM(10,sinsq)", + "ELM(10,tribas)", "ELM(hardlim)", "ELM(20,rbf(0.1))"] + + nh = 10 + + # pass user defined transfer func + sinsq = (lambda x: np.power(np.sin(x), 2.0)) + srhl_sinsq = MLPRandomLayer(n_hidden=nh, activation_func=sinsq) + + # use internal transfer funcs + srhl_tanh = MLPRandomLayer(n_hidden=nh, activation_func='tanh') + + srhl_tribas = MLPRandomLayer(n_hidden=nh, activation_func='tribas') + + srhl_hardlim = MLPRandomLayer(n_hidden=nh, activation_func='hardlim') + + # use gaussian RBF + srhl_rbf = RBFRandomLayer(n_hidden=nh*2, rbf_width=0.1, random_state=0) + + log_reg = LogisticRegression() + + classifiers = [GenELMClassifier(hidden_layer=srhl_tanh), + GenELMClassifier(hidden_layer=srhl_tanh, regressor=log_reg), + GenELMClassifier(hidden_layer=srhl_sinsq), + GenELMClassifier(hidden_layer=srhl_tribas), + GenELMClassifier(hidden_layer=srhl_hardlim), + GenELMClassifier(hidden_layer=srhl_rbf)] + + return names, classifiers + + +def make_linearly_separable(): + X, y = make_classification(n_samples=200, n_features=2, n_redundant=0, + n_informative=2, random_state=1, + n_clusters_per_class=1) + rng = np.random.RandomState(2) + X += 2 * rng.uniform(size=X.shape) + return (X, y) + +############################################################################### + +datasets = make_datasets() +names, classifiers = make_classifiers() + +i = 1 +figure = pl.figure(figsize=(18, 9)) + +# iterate over datasets +for ds in datasets: + # preprocess dataset, split into training and test part + X, y = ds + X = StandardScaler().fit_transform(X) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, + random_state=0) + + x_min, x_max, y_min, y_max, xx, yy = get_data_bounds(X) + + # plot dataset first + ax = pl.subplot(len(datasets), len(classifiers) + 1, i) + plot_data(ax, X_train, y_train, X_test, y_test, xx, yy) + + i += 1 + + # iterate over classifiers + for name, clf in zip(names, classifiers): + ax = pl.subplot(len(datasets), len(classifiers) + 1, i) + clf.fit(X_train, y_train) + score = clf.score(X_test, y_test) + + # Plot the decision boundary. For that, we will asign a color to each + # point in the mesh [x_min, m_max]x[y_min, y_max]. + Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) + + # Put the result into a color plot + Z = Z.reshape(xx.shape) + + plot_contour(ax, X_train, y_train, X_test, y_test, xx, yy, Z) + + i += 1 + +figure.subplots_adjust(left=.02, right=.98) +pl.show() + diff --git a/random_layer.py b/random_layer.py index 713a795..216788e 100644 --- a/random_layer.py +++ b/random_layer.py @@ -1,20 +1,3 @@ -#-*- coding: utf8 -# Author: David C. Lambert [dcl -at- panix -dot- com] -# Copyright(c) 2013 -# License: Simple BSD - -"""The :mod:`random_layer` module -implements Random Layer transformers. - -Random layers are arrays of hidden unit activations that are -random functions of input activation values (dot products for simple -activation functions, distances from prototypes for radial basis -functions). - -They are used in the implementation of Extreme Learning Machines (ELMs), -but can be used as a general input mapping. -""" - from abc import ABCMeta, abstractmethod from math import sqrt @@ -24,7 +7,7 @@ from scipy.spatial.distance import cdist, pdist, squareform from sklearn.metrics import pairwise_distances -from sklearn.utils import check_random_state, atleast2d_or_csr +from sklearn.utils import check_random_state,check_array from sklearn.utils.extmath import safe_sparse_dot from sklearn.base import BaseEstimator, TransformerMixin @@ -95,20 +78,17 @@ def _compute_hidden_activations(self, X): # on the input array def fit(self, X, y=None): """Generate a random hidden layer. - Parameters ---------- X : {array-like, sparse matrix} of shape [n_samples, n_features] Training set: only the shape is used to generate random component values for hidden units - y : is not used: placeholder to allow for usage in a Pipeline. - Returns ------- self """ - X = atleast2d_or_csr(X) + X = check_array(X) self._generate_components(X) @@ -118,19 +98,16 @@ def fit(self, X, y=None): # (which will normally call compute_input_activations first) def transform(self, X, y=None): """Generate the random hidden layer's activations given X as input. - Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data to transform - y : is not used: placeholder to allow for usage in a Pipeline. - Returns ------- X_new : numpy array of shape [n_samples, n_components] """ - X = atleast2d_or_csr(X) + X = check_array(X) if (self.components_ is None): raise ValueError('No components initialized') @@ -142,43 +119,32 @@ class RandomLayer(BaseRandomLayer): """RandomLayer is a transformer that creates a feature mapping of the inputs that corresponds to a layer of hidden units with randomly generated components. - The transformed values are a specified function of input activations that are a weighted combination of dot product (multilayer perceptron) and distance (rbf) activations: - input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation - mlp_activation(x) = dot(x, weights) + bias rbf_activation(x) = rbf_width * ||x - center||/radius - alpha and rbf_width are specified by the user - weights and biases are taken from normal distribution of mean 0 and sd of 1 - centers are taken uniformly from the bounding hyperrectangle of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2) - The input activation is transformed by a transfer function that defaults to numpy.tanh if not specified, but can be any callable that returns an array of the same shape as its argument (the input activation array, of shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh', 'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric', or 'inv_multiquadric'. - Parameters ---------- `n_hidden` : int, optional (default=20) Number of units to generate - `alpha` : float, optional (default=0.5) Mixing coefficient for distance and dot product input activations: activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation - `rbf_width` : float, optional (default=1.0) multiplier on rbf_activation - `user_components`: dictionary, optional (default=None) dictionary containing values for components that woud otherwise be randomly generated. Valid key/value pairs are as follows: @@ -186,33 +152,25 @@ class RandomLayer(BaseRandomLayer): 'centers': array-like of shape [n_hidden, n_features] 'biases' : array-like of shape [n_hidden] 'weights': array-like of shape [n_features, n_hidden] - `activation_func` : {callable, string} optional (default='tanh') Function used to transform input activation - It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric', 'inv_multiquadric' or a callable. If None is given, 'tanh' will be used. - If a callable is given, it will be used to compute the activations. - `activation_args` : dictionary, optional (default=None) Supplies keyword arguments for a callable activation_func - `random_state` : int, RandomState instance or None (default=None) Control the pseudo random number generator used to generate the hidden unit weights at fit time. - Attributes ---------- `input_activations_` : numpy array of shape [n_samples, n_hidden] Array containing dot(x, hidden_weights) + bias for all samples - `components_` : dictionary containing two keys: `bias_weights_` : numpy array of shape [n_hidden] `hidden_weights_` : numpy array of shape [n_features, n_hidden] - See Also -------- """ @@ -424,60 +382,45 @@ def __init__(self, n_hidden=20, random_state=None, class GRBFRandomLayer(RBFRandomLayer): """Random Generalized RBF Hidden Layer transformer - Creates a layer of radial basis function units where: - f(a), s.t. a = ||x-c||/r - with c the unit center and f() is exp(-gamma * a^tau) where tau and r are computed based on [1] - Parameters ---------- `n_hidden` : int, optional (default=20) Number of units to generate, ignored if centers are provided - `grbf_lambda` : float, optional (default=0.05) GRBF shape parameter - `gamma` : {int, float} optional (default=1.0) Width multiplier for GRBF distance argument - `centers` : array of shape (n_hidden, n_features), optional (default=None) If provided, overrides internal computation of the centers - `radii` : array of shape (n_hidden), optional (default=None) If provided, overrides internal computation of the radii - `use_exemplars` : bool, optional (default=False) If True, uses random examples from the input to determine the RBF centers, ignored if centers are provided - `random_state` : int or RandomState instance, optional (default=None) Control the pseudo random number generator used to generate the centers at fit time, ignored if centers are provided - Attributes ---------- `components_` : dictionary containing two keys: `radii_` : numpy array of shape [n_hidden] `centers_` : numpy array of shape [n_hidden, n_features] - `input_activations_` : numpy array of shape [n_samples, n_hidden] Array containing ||x-c||/r for all samples - See Also -------- ELMRegressor, ELMClassifier, SimpleELMRegressor, SimpleELMClassifier, SimpleRandomLayer - References ---------- .. [1] Fernandez-Navarro, et al, "MELM-GRBF: a modified version of the extreme learning machine for generalized radial basis function neural networks", Neurocomputing 74 (2011), 2502-2510 - """ # def _grbf(acts, taus): # """GRBF activation function"""