Skip to content

Commit 70c60bf

Browse files
committed
Refactoring and consolidating the feedforward methods
1 parent 53a3e68 commit 70c60bf

File tree

2 files changed

+19
-35
lines changed

2 files changed

+19
-35
lines changed

code/backprop2.py

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
backprop2
33
~~~~~~~~~
44
5-
This is a minor variation on the backprop module. The most
6-
significant changes are: (1) It uses the cross-entropy cost by
7-
default, instead of the quadratic cost; and (2) The weights and bias
8-
for each neuron are initialized as a Gaussian random variable whose
9-
mean is zero and standard deviation is one over the square-root of the
10-
neuron's fan-in (instead of having mean zero and standard deviation
11-
one).
5+
This is a minor variation on the backprop module. The main changes
6+
are: (1) It uses the cross-entropy cost by default, instead of the
7+
quadratic cost; (2) The weights and bias for each neuron are
8+
initialized as a Gaussian random variable whose mean is zero and
9+
standard deviation is one over the square-root of the neuron's fan-in
10+
(instead of having mean zero and standard deviation one); and (3) The
11+
feedforward method has been generalized to make it easier to propagate
12+
activations through part of the network.
1213
"""
1314

1415
#### Libraries
@@ -37,9 +38,14 @@ def __init__(self, sizes):
3738
self.weights = [np.random.randn(y, x)/np.sqrt(y)
3839
for x, y in zip(sizes[:-1], sizes[1:])]
3940

40-
def feedforward(self, a):
41-
"Return the output of the network if ``a`` is input."
42-
for b, w in zip(self.biases, self.weights):
41+
def feedforward(self, a, start=0, end=None):
42+
"""Return the result from feeding forward the activation ``a``
43+
from layer ``start`` through to layer ``end``. Note that if
44+
``end`` is ``None`` then this is interpreted as ``end =
45+
self.num_layers``, i.e., the default behaviour is to propagate
46+
through to the end of the network."""
47+
end = self.num_layers if end == None
48+
for b, w in zip(self.biases, self.weights)[start:end]:
4349
a = sigmoid_vec(np.dot(w, a)+b)
4450
return a
4551

@@ -167,29 +173,6 @@ def evaluate_training_results(self, training_data):
167173
return sum(int(x == y)
168174
for x, y in zip(training_results, actual_training_results))
169175

170-
def initial_feedforward(self, input_data, j):
171-
"""
172-
Feedforward the elements ``x`` in the list ``input_data``
173-
through the network until the ``j``th layer. Return the list
174-
of activations from the ``j``th layer.
175-
"""
176-
for k in range(j):
177-
intermediate_data = [
178-
sigmoid_vec(np.dot(self.weights[k], x)+self.biases[k])
179-
for x in input_data]
180-
return intermediate_data
181-
182-
def final_feedforward(self, intermediate_data, j):
183-
"""
184-
Feedforward the elements ``x`` in the list
185-
``intermediate_data`` through the network to the output. The
186-
elements in ``intermediate_data`` are assumed to be inputs to
187-
the ``j``th layer."""
188-
for k in range(j, len(self.weights)):
189-
output_data = [
190-
sigmoid_vec(np.dot(self.weights[k], a)+self.biases[k])
191-
for a in intermediate_data]
192-
return output_data
193176

194177
#### Miscellaneous functions
195178
def minimal_cross_entropy(training_data):

code/deep_autoencoder.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,8 @@ def train_nested_autoencoder_repl(
100100
data for the first layer of the network, and is a list of
101101
entries ``x``."""
102102
self.train_nested_autoencoder(
103-
j, double(self.initial_feedforward(training_data, j)),
103+
j, double(
104+
[self.feedforward(x, start=0, end=j) for x in training_data]),
104105
epochs, mini_batch_size, eta, lmbda)
105106

106107
def feature(self, j, k):
@@ -109,7 +110,7 @@ def feature(self, j, k):
109110
activated, and all others are not active. """
110111
a = np.zeros((self.sizes[j], 1))
111112
a[k] = 1.0
112-
return self.final_feedforward([a], j)[0]
113+
return self.feedforward(a, start=j, end=self.num_layers)
113114

114115
def double(l):
115116
return [(x, x) for x in l]

0 commit comments

Comments
 (0)