88from  torch .autograd  import  Variable 
99
1010
11- # Create a torch tensor with random normal. 
12- x  =  torch .randn (5 , 3 )
13- print  (x )
14- 
15- # Build a layer. 
11+ #========================== Table of Contents ==========================# 
12+ # 1. Basic autograd example 1               (Line 21 to 36) 
13+ # 2. Basic autograd example 2               (Line 39 to 80) 
14+ # 3. Loading data from numpy                (Line 83 to 86) 
15+ # 4. Implementing the input pipline         (Line 90 to 117) 
16+ # 5. Input pipline for custom dataset       (Line 119 to 139) 
17+ # 6. Using pretrained model                 (Line142 to 156) 
18+ # 7. Save and load model                    (Line 159 to L161)  
19+ 
20+ 
21+ #======================= Basic autograd example 1 =======================# 
22+ # Create tensors. 
23+ x  =  Variable (torch .Tensor ([1 ]), requires_grad = True )
24+ w  =  Variable (torch .Tensor ([2 ]), requires_grad = True )
25+ b  =  Variable (torch .Tensor ([3 ]), requires_grad = True )
26+ 
27+ # Build a computational graph. 
28+ y  =  w  *  x  +  b    # y = 2 * x + 3 
29+ 
30+ # Compute gradients 
31+ y .backward ()
32+ 
33+ # Print out the gradients 
34+ print (x .grad )   # x.grad = 2  
35+ print (w .grad )   # w.grad = 1  
36+ print (b .grad )   # b.grad = 1  
37+ 
38+ 
39+ #======================== Basic autograd example 2 =======================# 
40+ # Create tensors. 
41+ x  =  Variable (torch .randn (5 , 3 ))
42+ y  =  Variable (torch .randn (5 , 2 ))
43+ print  ('x: ' , x )
44+ print  ('y: ' , y )
45+ 
46+ # Build a linear layer. 
1647linear  =  nn .Linear (3 , 2 )
17- print  (linear .weight )
18- print  (linear .bias )
48+ print  ('w: ' , linear .weight )
49+ print  ('b: ' , linear .bias )
50+ 
51+ # Build Loss and Optimizer. 
52+ criterion  =  nn .MSELoss ()
53+ optimizer  =  torch .optim .SGD (linear .parameters (), lr = 0.01 )
54+ 
55+ # Forward propagation. 
56+ pred  =  linear (x )
57+ print ('pred: ' , pred )
58+ 
59+ # Compute loss. 
60+ loss  =  criterion (pred , y )
61+ print ('loss: ' , loss .data [0 ])
62+ 
63+ # Backpropagation. 
64+ loss .backward ()
65+ 
66+ # Print out the gradients. 
67+ print  ('dL/dw: ' , linear .weight .grad ) 
68+ print  ('dL/db: ' , linear .bias .grad )
69+ 
70+ # 1-step Optimization (gradient descent). 
71+ optimizer .step ()
72+ print  ('Optimized..!' )
1973
20- # Forward propagate . 
21- y   =   linear ( Variable ( x ) )
22- print  ( y )
74+ # You can also do optimization at the low level as shown below . 
75+ #  linear.weight.data.sub_(0.01 * linear.weight.grad.data )
76+ # linear.bias.data.sub_(0.01 * linear.bias.grad.data )
2377
24- # Convert numpy array to torch tensor. 
78+ # Print out the loss after optimization. 
79+ loss  =  criterion (pred , y )
80+ print ('loss after 1 step optimization: ' , loss .data [0 ])
81+ 
82+ 
83+ #======================== Loading data from numpy ========================# 
2584a  =  np .array ([[1 ,2 ], [3 ,4 ]])
2685b  =  torch .from_numpy (a )
2786print  (b )
2887
29- # Download and load cifar10 dataset . 
30- train_dataset  =  dsets .CIFAR10 (root = './data/' ,
88+ 
89+ 
90+ #===================== Implementing the input pipline =====================# 
91+ # Download and construct dataset. 
92+ train_dataset  =  dsets .CIFAR10 (root = '../data/' ,
3193                               train = True , 
3294                               transform = transforms .ToTensor (),
3395                               download = True )
3496
35- # Select one data pair. 
97+ # Select one data pair (read data from disk) . 
3698image , label  =  train_dataset [0 ]
3799print  (image .size ())
38100print  (label )
39101
40- # Input pipeline  (this provides queue and thread in a very simple way). 
102+ # Data Loader  (this provides queue and thread in a very simple way). 
41103train_loader  =  torch .utils .data .DataLoader (dataset = train_dataset ,
42104                                           batch_size = 100 , 
43105                                           shuffle = True ,
44106                                           num_workers = 2 )
45107
46- # When iteration starts, queue and thread start to load dataset. 
108+ # When iteration starts, queue and thread start to load dataset from files . 
47109data_iter  =  iter (train_loader )
48110
49111# Mini-batch images and labels. 
54116    # Your training code will be written here 
55117    pass 
56118
57- # Build custom dataset. 
119+ #===================== Input pipline for custom dataset =====================# 
120+ # You should build custom dataset as below. 
58121class  CustomDataset (data .Dataset ):
59122    def  __init__ (self ):
123+         # TODO 
124+         # 1. Initialize file path or list of file names.  
60125        pass 
61126    def  __getitem__ (self , index ):
62127        # TODO 
63-         # 1. Read one data from file (e.g. using np .fromfile, PIL.Image.open). 
128+         # 1. Read one data from file (e.g. using numpy .fromfile, PIL.Image.open). 
64129        # 2. Return a data pair (e.g. image and label). 
65130        pass 
66131    def  __len__ (self ):
67132        # You should change 0 to the total size of your dataset. 
68133        return  0  
69134
135+ # Then, you can just use prebuilt torch's data loader.  
70136train_loader  =  torch .utils .data .DataLoader (dataset = train_dataset ,
71137                                           batch_size = 100 , 
72138                                           shuffle = True ,
73139                                           num_workers = 2 )
74140
75141
76- # Download and load pretrained model. 
142+ #========================== Using pretrained model ==========================# 
143+ # Download and load pretrained resnet. 
77144resnet  =  torchvision .models .resnet18 (pretrained = True )
78145
79- # Detach top layer for finetuning. 
80- sub_model  =  nn .Sequential (* list (resnet .children ())[:- 1 ])
146+ # If you want to finetune only top layer of the model. 
147+ for  param  in  resnet .parameters ():
148+     param .requires_grad  =  False 
149+     
150+ # Replace top layer for finetuning. 
151+ resnet .fc  =  nn .Linear (resnet .fc .in_features , 100 )  # 100 is for example. 
81152
82153# For test 
83154images  =  Variable (torch .randn (10 , 3 , 256 , 256 ))
84- print  (resnet (images ).size ())
85- print  (sub_model (images ).size ())
155+ outputs  =  resnet (images )
156+ print  (outputs .size ())   # (10, 100) 
157+ 
86158
87- # Save and load the  model.  
88- torch .save (sub_model , 'model.pkl' )
159+ #============================  Save and load model ============================#  
160+ torch .save (resnet , 'model.pkl' )
89161model  =  torch .load ('model.pkl' )
0 commit comments