Deep-Learning-Keras-Tensorflow - 1.1.1 Perceptron and Adaline - Ipynb at Master Leriomaggio - Deep-Learning-Keras-Tensorflow
Deep-Learning-Keras-Tensorflow - 1.1.1 Perceptron and Adaline - Ipynb at Master Leriomaggio - Deep-Learning-Keras-Tensorflow
master
Valerio Maggio
Repo Structure Refactoring + Merge from PyData London Version
History
0
contributors
Sections
Implementing a perceptron learning algorithm in Python
Training a perceptron model on the Iris dataset
Adaptive linear neurons and the convergence of learning
Implementing an adaptive linear neuron in Python
%matplotlib inline
import matplotlib
[back to top]
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
n_iter : int
Attributes
-----------
w_ : 1d-array
errors_ : list
"""
self.eta = eta
self.n_iter = n_iter
Parameters
----------
Target values.
Returns
-------
self : object
"""
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
self.w_[1:] += update * xi
self.w_[0] += update
self.errors_.append(errors)
return self
[back to top]
import pandas as pd
iris = load_iris()
X = iris.data
y = iris.target
labels = iris.target_names
features = iris.feature_names
df = pd.DataFrame(data, columns=iris.feature_names+[
'label'])
df.tail()
y = df.iloc[0:100, 4].values
# plot data
plt.legend(loc='upper left')
plt.show()
ppn.fit(X, y)
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.tight_layout()
plt.show()
cmap = ListedColormap(colors[:len(np.unique(y
))])
np.arange(x2_min, x2_max, r
esolution))
Z = classifier.predict(np.array([xx1.ravel(), xx
2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
[back to top]
Parameters
------------
eta : float
n_iter : int
Attributes
-----------
w_ : 1d-array
errors_ : list
"""
self.eta = eta
self.n_iter = n_iter
Parameters
----------
Target values.
Returns
-------
self : object
"""
self.cost_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.cost_.append(cost)
return self
return self.net_input(X)
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('log(Sum-squared-error)')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Sum-squared-error')
plt.tight_layout()
plt.show()
X_std = np.copy(X)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
plt.xlabel('Epochs')
plt.ylabel('Sum-squared-error')
plt.tight_layout()
plt.show()
[back to top]
class AdalineSGD(object):
Parameters
------------
eta : float
n_iter : int
Attributes
-----------
w_ : 1d-array
errors_ : list
"""
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
if random_state:
seed(random_state)
Parameters
----------
Target values.
Returns
-------
self : object
"""
self._initialize_weights(X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X, y = self._shuffle(X, y)
cost = []
avg_cost = sum(cost)/len(y)
self.cost_.append(avg_cost)
return self
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0] > 1:
else:
self._update_weights(X, y)
return self
r = np.random.permutation(len(y))
self.w_ = np.zeros(1 + m)
self.w_initialized = True
output = self.net_input(xi)
return cost
return self.net_input(X)
ada.fit(X_std, y)
plot_decision_regions(X_std, y, classifier=ada)
plt.legend(loc='upper left')