@@ -93,11 +93,11 @@ def backprop(self, training_data, n, eta):
93
93
nabla_b [- 1 ] += delta
94
94
nabla_w [- 1 ] += np .dot (delta , activations [- 2 ].transpose ())
95
95
# Note that the variable l in the loop below is used a
96
- # little differently to the book. Here, l = 1 means the
97
- # last layer of neurons, l = 2 is the second-last layer,
98
- # and so on. It's a renumbering of the scheme used in the
99
- # book, used to take advantage of the fact that Python can
100
- # use negative indices in lists.
96
+ # little differently to the notation in Chapter 2 of the book.
97
+ # Here, l = 1 means the last layer of neurons, l = 2 is the
98
+ # second-last layer, and so on. It's a renumbering of the
99
+ # scheme used in the book, used here to take advantage of the
100
+ # fact that Python can use negative indices in lists.
101
101
for l in xrange (2 , self .num_layers ):
102
102
z = zs [- l ]
103
103
spv = sigmoid_prime_vec (z )
@@ -116,11 +116,6 @@ def evaluate(self, test_data):
116
116
for (x , y ) in test_data ]
117
117
return sum (int (x == y ) for (x , y ) in test_results )
118
118
119
- def cost (self , x , y ):
120
- """Return the quadratic cost associated to the network, with
121
- input ``x`` and desired output ``y``."""
122
- return np .sum ((self .feedforward (x )- y )** 2 )/ 2.0
123
-
124
119
def cost_derivative (self , output_activations , y ):
125
120
"""Return the vector of partial derivatives \partial C_x /
126
121
\partial a for the output activations, ``a``."""
0 commit comments