@@ -53,13 +53,13 @@ def SGD(self, training_data, epochs, mini_batch_size, eta,
53
53
but slows things down substantially. If ``test`` is set, then
54
54
appropriate ``test_data`` must be supplied.
55
55
"""
56
- if test : n_test = len (test_inputs )
56
+ if test : n_test = len (test_data )
57
57
n = len (training_data )
58
58
for j in xrange (epochs ):
59
59
random .shuffle (training_data )
60
60
mini_batches = [
61
61
training_data [k :k + mini_batch_size ]
62
- for k in xrange (0 , len ( training_data ) , mini_batch_size )]
62
+ for k in xrange (0 , n , mini_batch_size )]
63
63
for mini_batch in mini_batches :
64
64
self .backprop (mini_batch , n , eta , lmbda )
65
65
if test :
@@ -117,9 +117,9 @@ def evaluate(self, test_data):
117
117
network outputs the correct result. Note that the neural
118
118
network's output is assumed to be the index of whichever
119
119
neuron in the final layer has the highest activation."""
120
- test_results = [np .argmax (self .feedforward (x )) for x in test_data [ 0 ]]
121
- return sum ( int ( x == y )
122
- for x , y in zip ( test_results , test_data [ 1 ]) )
120
+ test_results = [( np .argmax (self .feedforward (x )), y )
121
+ for ( x , y ) in test_data ]
122
+ return sum ( int ( x == y ) for ( x , y ) in test_results )
123
123
124
124
def cost (self , x , y ):
125
125
"""Return the quadratic cost associated to the network, with
0 commit comments