|
10 | 10 |
|
11 | 11 | #========================== Table of Contents ==========================# |
12 | 12 | # 1. Basic autograd example 1 (Line 21 to 36) |
13 | | -# 2. Basic autograd example 2 (Line 39 to 80) |
14 | | -# 3. Loading data from numpy (Line 83 to 86) |
15 | | -# 4. Implementing the input pipline (Line 90 to 117) |
16 | | -# 5. Input pipline for custom dataset (Line 119 to 139) |
17 | | -# 6. Using pretrained model (Line142 to 156) |
18 | | -# 7. Save and load model (Line 159 to L161) |
| 13 | +# 2. Basic autograd example 2 (Line 39 to 76) |
| 14 | +# 3. Loading data from numpy (Line 79 to 82) |
| 15 | +# 4. Implementing the input pipline (Line 86 to 113) |
| 16 | +# 5. Input pipline for custom dataset (Line 115 to 135) |
| 17 | +# 6. Using pretrained model (Line 138 to 152) |
| 18 | +# 7. Save and load model (Line 155 to L157) |
19 | 19 |
|
20 | 20 |
|
21 | 21 | #======================= Basic autograd example 1 =======================# |
|
25 | 25 | b = Variable(torch.Tensor([3]), requires_grad=True) |
26 | 26 |
|
27 | 27 | # Build a computational graph. |
28 | | -y = w * x + b # y = 2 * x + 3 |
| 28 | +y = w * x + b # y = 2 * x + 3 |
29 | 29 |
|
30 | 30 | # Compute gradients |
31 | 31 | y.backward() |
32 | 32 |
|
33 | 33 | # Print out the gradients |
34 | | -print(x.grad) # x.grad = 2 |
35 | | -print(w.grad) # w.grad = 1 |
36 | | -print(b.grad) # b.grad = 1 |
| 34 | +print(x.grad) # x.grad = 2 |
| 35 | +print(w.grad) # w.grad = 1 |
| 36 | +print(b.grad) # b.grad = 1 |
37 | 37 |
|
38 | 38 |
|
39 | 39 | #======================== Basic autograd example 2 =======================# |
40 | 40 | # Create tensors. |
41 | 41 | x = Variable(torch.randn(5, 3)) |
42 | 42 | y = Variable(torch.randn(5, 2)) |
43 | | -print ('x: ', x) |
44 | | -print ('y: ', y) |
45 | 43 |
|
46 | 44 | # Build a linear layer. |
47 | 45 | linear = nn.Linear(3, 2) |
|
54 | 52 |
|
55 | 53 | # Forward propagation. |
56 | 54 | pred = linear(x) |
57 | | -print('pred: ', pred) |
58 | 55 |
|
59 | 56 | # Compute loss. |
60 | 57 | loss = criterion(pred, y) |
|
69 | 66 |
|
70 | 67 | # 1-step Optimization (gradient descent). |
71 | 68 | optimizer.step() |
72 | | -print ('Optimized..!') |
73 | 69 |
|
74 | 70 | # You can also do optimization at the low level as shown below. |
75 | 71 | # linear.weight.data.sub_(0.01 * linear.weight.grad.data) |
|
0 commit comments