|
6 | 6 | from sample_images import sample_images
|
7 | 7 | from display_network import display_network
|
8 | 8 | from sparse_autoencoder_cost import sparse_autoencoder_cost
|
| 9 | +from check_numerical_gradient import check_numerical_gradient |
| 10 | +from compute_numerical_gradient import compute_numerical_gradient |
9 | 11 |
|
10 | 12 |
|
11 | 13 | def initialize_parameters(hidden_size, visible_size):
|
@@ -43,8 +45,8 @@ def train():
|
43 | 45 | # display a random sample of 200 patches from the dataset
|
44 | 46 |
|
45 | 47 | patches = sample_images()
|
46 |
| - list = [randint(0, patches.shape[0]-1) for i in xrange(64)] |
47 |
| - display_network(patches[list, :], 8) |
| 48 | +# list = [randint(0, patches.shape[0]-1) for i in xrange(64)] |
| 49 | +# display_network(patches[list, :], 8) |
48 | 50 |
|
49 | 51 | # Obtain random parameters theta
|
50 | 52 | theta = initialize_parameters(hidden_size, visible_size)
|
@@ -77,7 +79,32 @@ def train():
|
77 | 79 |
|
78 | 80 | cost, grad = sparse_autoencoder_cost(theta, visible_size, hidden_size, decay_lambda, sparsity_param, beta, patches)
|
79 | 81 |
|
80 |
| - |
| 82 | + ## STEP 3: Gradient Checking |
| 83 | + # |
| 84 | + # Hint: If you are debugging your code, performing gradient checking on smaller models |
| 85 | + # and smaller training sets (e.g., using only 10 training examples and 1-2 hidden |
| 86 | + # units) may speed things up. |
| 87 | + |
| 88 | + # First, lets make sure your numerical gradient computation is correct for a |
| 89 | + # simple function. After you have implemented compute_numerical_gradient, |
| 90 | + # run the following: |
| 91 | + check_numerical_gradient() |
| 92 | + |
| 93 | + # Now we can use it to check your cost function and derivative calculations |
| 94 | + # for the sparse autoencoder. |
| 95 | + func = lambda x: sparse_autoencoder_cost(x, visible_size, hidden_size, |
| 96 | + decay_lambda, sparsity_param, beta, patches) |
| 97 | + numgrad = compute_numerical_gradient(func, theta) |
| 98 | + |
| 99 | + # Use this to visually compare the gradients side by side |
| 100 | + print numgrad, grad |
| 101 | + |
| 102 | + # Compare numerically computed gradients with the ones obtained from backpropagation |
| 103 | + diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad) |
| 104 | + # Should be small. In our implementation, these values are usually less than 1e-9. |
| 105 | + print diff |
| 106 | + |
| 107 | + # When you got this working, Congratulations!!! |
81 | 108 |
|
82 | 109 |
|
83 | 110 | if __name__ == "__main__":
|
|
0 commit comments