Skip to content

Commit 07745a0

Browse files
committed
Removed regularization from network_basic
1 parent 156673a commit 07745a0

File tree

1 file changed

+5
-11
lines changed

1 file changed

+5
-11
lines changed

code/network_basic.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ def feedforward(self, a):
4242
return a
4343

4444
def SGD(self, training_data, epochs, mini_batch_size, eta,
45-
lmbda, test_data=None):
45+
test_data=None):
4646
"""Train the neural network using mini-batch stochastic
4747
gradient descent. The ``training_data`` is a list of tuples
4848
``(x, y)`` representing the training inputs and the desired
@@ -59,14 +59,14 @@ def SGD(self, training_data, epochs, mini_batch_size, eta,
5959
training_data[k:k+mini_batch_size]
6060
for k in xrange(0, n, mini_batch_size)]
6161
for mini_batch in mini_batches:
62-
self.backprop(mini_batch, n, eta, lmbda)
62+
self.backprop(mini_batch, n, eta)
6363
if test_data:
6464
print "Epoch {}: {} / {}".format(
6565
j, self.evaluate(test_data), n_test)
6666
else:
6767
print "Epoch %s complete" % j
6868

69-
def backprop(self, training_data, n, eta, lmbda):
69+
def backprop(self, training_data, n, eta):
7070
"""Update the network's weights and biases by applying a
7171
single iteration of gradient descent using backpropagation.
7272
The ``training_data`` is a list of tuples ``(x, y)``. It need
@@ -77,7 +77,6 @@ def backprop(self, training_data, n, eta, lmbda):
7777
self-explanatory."""
7878
nabla_b = [np.zeros(b.shape) for b in self.biases]
7979
nabla_w = [np.zeros(w.shape) for w in self.weights]
80-
B = len(training_data)
8180
for x, y in training_data:
8281
# feedforward
8382
activation = x
@@ -105,8 +104,6 @@ def backprop(self, training_data, n, eta, lmbda):
105104
delta = np.dot(self.weights[-l+1].transpose(), delta) * spv
106105
nabla_b[-l] += delta
107106
nabla_w[-l] += np.dot(delta, activations[-l-1].transpose())
108-
# Add the regularization terms to the gradient for the weights
109-
nabla_w = [nw+(lmbda*B/n)*w for nw, w in zip(nabla_w, self.weights)]
110107
self.weights = [w-eta*nw for w, nw in zip(self.weights, nabla_w)]
111108
self.biases = [b-eta*nb for b, nb in zip(self.biases, nabla_b)]
112109

@@ -121,15 +118,12 @@ def evaluate(self, test_data):
121118

122119
def cost(self, x, y):
123120
"""Return the quadratic cost associated to the network, with
124-
input ``x`` and desired output ``y``. Note that there is no
125-
regularization."""
121+
input ``x`` and desired output ``y``."""
126122
return np.sum((self.feedforward(x)-y)**2)/2.0
127123

128124
def cost_derivative(self, output_activations, y):
129125
"""Return the vector of partial derivatives \partial C_x /
130-
\partial a for the output activations, ``a``. For the
131-
unregularized quadratic cost this is just the difference
132-
between the output activations and the desired output, ``y``."""
126+
\partial a for the output activations, ``a``."""
133127
return (output_activations-y)
134128

135129
#### Miscellaneous functions

0 commit comments

Comments
 (0)