Skip to content

Commit 0090ef9

Browse files
committed
removed sklearn refs from docstrings
reinstated direct use of the pseudo-inverse when a regressor is not provided
1 parent 4dedcb6 commit 0090ef9

File tree

3 files changed

+8
-12
lines changed

3 files changed

+8
-12
lines changed

elm.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
# License: Simple BSD
55

66
"""
7-
The :mod:`sklearn.neural_networks.elm` module implements the
7+
The :mod:`elm` module implements the
88
Extreme Learning Machine Classifiers and Regressors (ELMClassifier,
99
ELMRegressor, SimpleELMRegressor, SimpleELMClassifier).
1010
@@ -24,11 +24,12 @@
2424
from abc import ABCMeta, abstractmethod
2525

2626
import numpy as np
27+
from scipy.linalg import pinv2
2728

2829
from sklearn.utils import as_float_array
30+
from sklearn.utils.extmath import safe_sparse_dot
2931
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
3032
from sklearn.preprocessing import LabelBinarizer
31-
from sklearn.linear_model import LinearRegression
3233

3334
from random_hidden_layer import SimpleRandomHiddenLayer
3435

@@ -140,19 +141,17 @@ def __init__(self,
140141

141142
super(ELMRegressor, self).__init__(hidden_layer, regressor)
142143

144+
self.coefs_ = None
143145
self.fitted_ = False
144146
self.hidden_activations_ = None
145-
self._lin_reg = LinearRegression(copy_X=False,
146-
normalize=False,
147-
fit_intercept=False)
148147

149148
def _fit_regression(self, y):
150149
"""
151150
fit regression using internal linear regression
152151
or supplied regressor
153152
"""
154153
if (self.regressor is None):
155-
self._lin_reg.fit(self.hidden_activations_, y)
154+
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
156155
else:
157156
self.regressor.fit(self.hidden_activations_, y)
158157

@@ -189,7 +188,7 @@ def fit(self, X, y):
189188
def _get_predictions(self, X):
190189
"""get predictions using internal least squares/supplied regressor"""
191190
if (self.regressor is None):
192-
preds = self._lin_reg.predict(self.hidden_activations_)
191+
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
193192
else:
194193
preds = self.regressor.predict(self.hidden_activations_)
195194

elm_notebook.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
# <codecell>
55

6-
# Demo python notebook for sklearn elm and random_hidden_layer modules
6+
# Demo python notebook for elm and random_hidden_layer modules
77
#
88
# Author: David C. Lambert [dcl -at- panix -dot- com]
99
# Copyright(c) 2013
@@ -210,6 +210,3 @@ def powtanh_xfer(activations, power=1.0):
210210
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
211211
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
212212

213-
# <codecell>
214-
215-

random_hidden_layer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# Copyright(c) 2013
44
# License: Simple BSD
55

6-
"""The :mod:`sklearn.neural_networks.random_hidden_layer` module
6+
"""The :mod:`random_hidden_layer` module
77
implements Random Hidden Layer transformers.
88
99
Random hidden layers are arrays of hidden unit activations that are

0 commit comments

Comments
 (0)