Skip to content

Commit d0b94c5

Browse files
committed
fix some spacing
1 parent 476c892 commit d0b94c5

File tree

5 files changed

+0
-11
lines changed

5 files changed

+0
-11
lines changed

README.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,6 @@ for t in range(500):
230230
import torch
231231
from torch.autograd import Variable
232232

233-
234-
235-
236233
class MyReLU(torch.autograd.Function):
237234
"""
238235
We can implement our own custom autograd Functions by subclassing
@@ -391,7 +388,6 @@ with tf.Session() as sess:
391388
import torch
392389
from torch.autograd import Variable
393390

394-
395391
# N is batch size; D_in is input dimension;
396392
# H is hidden dimension; D_out is output dimension.
397393
N, D_in, H, D_out = 64, 1000, 100, 10

autograd/two_layer_net_custom_function.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
import torch
22
from torch.autograd import Variable
33

4-
5-
64
"""
75
A fully-connected ReLU network with one hidden layer and no biases, trained to
86
predict y from x by minimizing squared Euclidean distance.
@@ -14,7 +12,6 @@
1412
the ReLU function.
1513
"""
1614

17-
1815
class MyReLU(torch.autograd.Function):
1916
"""
2017
We can implement our own custom autograd Functions by subclassing

nn/dynamic_net.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,4 +68,3 @@ def forward(self, x):
6868
optimizer.zero_grad()
6969
loss.backward()
7070
optimizer.step()
71-

nn/two_layer_net_nn.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import torch
22
from torch.autograd import Variable
33

4-
54
"""
65
A fully-connected ReLU network with one hidden layer, trained to predict y from x
76
by minimizing squared Euclidean distance.

nn/two_layer_net_optim.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import torch
22
from torch.autograd import Variable
33

4-
54
"""
65
A fully-connected ReLU network with one hidden layer, trained to predict y from x
76
by minimizing squared Euclidean distance.
@@ -54,4 +53,3 @@
5453

5554
# Calling the step function on an Optimizer makes an update to its parameters
5655
optimizer.step()
57-

0 commit comments

Comments
 (0)