diff --git a/01-Linear Regression/Linear_Regression.py b/01-Linear Regression/Linear_Regression.py index 3920aa8..c2488ef 100644 --- a/01-Linear Regression/Linear_Regression.py +++ b/01-Linear Regression/Linear_Regression.py @@ -1,29 +1,27 @@ -__author__ = 'SherlockLiao' +# encoding: utf-8 +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" +import matplotlib.pyplot as plt import torch -from torch import nn, optim +from torch import nn from torch.autograd import Variable -import numpy as np -import matplotlib.pyplot as plt -x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], +x_train = torch.tensor([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], [9.779], [6.182], [7.59], [2.167], [7.042], - [10.791], [5.313], [7.997], [3.1]], dtype=np.float32) + [10.791], [5.313], [7.997], [3.1]], dtype=torch.float32) -y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], +y_train = torch.tensor([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], [3.366], [2.596], [2.53], [1.221], [2.827], - [3.465], [1.65], [2.904], [1.3]], dtype=np.float32) - - -x_train = torch.from_numpy(x_train) - -y_train = torch.from_numpy(y_train) + [3.465], [1.65], [2.904], [1.3]], dtype=torch.float32) # Linear Regression Model -class LinearRegression(nn.Module): +class linearRegression(nn.Module): def __init__(self): - super(LinearRegression, self).__init__() + super(linearRegression, self).__init__() self.linear = nn.Linear(1, 1) # input and output is 1 dimension def forward(self, x): @@ -31,16 +29,16 @@ def forward(self, x): return out -model = LinearRegression() +model = linearRegression() # 定义loss和优化函数 criterion = nn.MSELoss() -optimizer = optim.SGD(model.parameters(), lr=1e-4) +optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) # 开始训练 num_epochs = 1000 for epoch in range(num_epochs): - inputs = Variable(x_train) - target = Variable(y_train) + inputs = x_train + target = y_train # forward out = model(inputs) @@ -51,12 +49,14 @@ def forward(self, x): optimizer.step() if (epoch+1) % 20 == 0: - print('Epoch[{}/{}], loss: {:.6f}' - .format(epoch+1, num_epochs, loss.data[0])) + print(f'Epoch[{epoch+1}/{num_epochs}], loss: {loss.item():.6f}') model.eval() -predict = model(Variable(x_train)) +with torch.no_grad(): + predict = model(x_train) predict = predict.data.numpy() + +fig = plt.figure(figsize=(10, 5)) plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') plt.plot(x_train.numpy(), predict, label='Fitting Line') # 显示图例 diff --git a/02-Logistic Regression/Logistic_Regression.py b/02-Logistic Regression/Logistic_Regression.py index cd1682b..6c58eb5 100644 --- a/02-Logistic Regression/Logistic_Regression.py +++ b/02-Logistic Regression/Logistic_Regression.py @@ -1,83 +1,80 @@ -__author__ = 'SherlockLiao' +# encoding: utf-8 +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" + + +import time import torch -from torch import nn, optim import torch.nn.functional as F -from torch.autograd import Variable +from torch import nn from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision import datasets -import time +from torchvision import datasets, transforms + # 定义超参数 -batch_size = 32 +batch_size = 64 learning_rate = 1e-3 -num_epoches = 100 +num_epochs = 100 # 下载训练集 MNIST 手写数字训练集 -train_dataset = datasets.MNIST( - root='./data', train=True, transform=transforms.ToTensor(), download=True) +train_dataset = datasets.FashionMNIST( + root='../datasets', train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST( - root='./data', train=False, transform=transforms.ToTensor()) +test_dataset = datasets.FashionMNIST( + root='../datasets', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) - # 定义 Logistic Regression 模型 -class Logstic_Regression(nn.Module): +class Logistic_Regression(nn.Module): def __init__(self, in_dim, n_class): - super(Logstic_Regression, self).__init__() - self.logstic = nn.Linear(in_dim, n_class) + super(Logistic_Regression, self).__init__() + self.logistic = nn.Linear(in_dim, n_class) def forward(self, x): - out = self.logstic(x) + out = self.logistic(x) return out -model = Logstic_Regression(28 * 28, 10) # 图片大小是28x28 +model = Logistic_Regression(28 * 28, 10) # 图片大小是28x28 use_gpu = torch.cuda.is_available() # 判断是否有GPU加速 if use_gpu: model = model.cuda() # 定义loss和optimizer criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(model.parameters(), lr=learning_rate) +optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # 开始训练 -for epoch in range(num_epoches): +for epoch in range(num_epochs): print('*' * 10) - print('epoch {}'.format(epoch + 1)) + print(f'epoch {epoch+1}') since = time.time() running_loss = 0.0 running_acc = 0.0 + model.train() for i, data in enumerate(train_loader, 1): img, label = data img = img.view(img.size(0), -1) # 将图片展开成 28x28 if use_gpu: - img = Variable(img).cuda() - label = Variable(label).cuda() - else: - img = Variable(img) - label = Variable(label) + img = img.cuda() + label = label.cuda() # 向前传播 out = model(img) loss = criterion(out, label) - running_loss += loss.data[0] * label.size(0) + running_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - running_acc += num_correct.data[0] + running_acc += (pred==label).float().mean() # 向后传播 optimizer.zero_grad() loss.backward() optimizer.step() if i % 300 == 0: - print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, num_epoches, running_loss / (batch_size * i), - running_acc / (batch_size * i))) - print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, running_loss / (len(train_dataset)), running_acc / (len( - train_dataset)))) + print(f'[{epoch+1}/{num_epochs}] Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') + print(f'Finish {epoch+1} epoch, Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') model.eval() eval_loss = 0. eval_acc = 0. @@ -85,21 +82,16 @@ def forward(self, x): img, label = data img = img.view(img.size(0), -1) if use_gpu: - img = Variable(img, volatile=True).cuda() - label = Variable(label, volatile=True).cuda() - else: - img = Variable(img, volatile=True) - label = Variable(label, volatile=True) - out = model(img) - loss = criterion(out, label) - eval_loss += loss.data[0] * label.size(0) + img = img.cuda() + label = label.cuda() + with torch.no_grad(): + out = model(img) + loss = criterion(out, label) + eval_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - eval_acc += num_correct.data[0] - print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( - test_dataset)), eval_acc / (len(test_dataset)))) - print('Time:{:.1f} s'.format(time.time() - since)) - print() + eval_acc += (pred == label).float().mean() + print(f'Test Loss: {eval_loss/len(test_loader):.6f}, Acc: {eval_acc/len(test_loader):.6f}') + print(f'Time:{(time.time()-since):.1f} s') # 保存模型 torch.save(model.state_dict(), './logstic.pth') diff --git a/03-Neural Network/neural_network.py b/03-Neural Network/neural_network.py index f411f6b..f728a0e 100644 --- a/03-Neural Network/neural_network.py +++ b/03-Neural Network/neural_network.py @@ -1,35 +1,42 @@ -__author__ = 'SherlockLiao' +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" import torch -from torch import nn, optim - -from torch.autograd import Variable +from torch import nn from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision import datasets +from torchvision import datasets, transforms -batch_size = 32 +batch_size = 64 learning_rate = 1e-2 -num_epoches = 50 +num_epochs = 50 +use_gpu = torch.cuda.is_available() # 下载训练集 MNIST 手写数字训练集 -train_dataset = datasets.MNIST( - root='./data', train=True, transform=transforms.ToTensor(), download=True) +train_dataset = datasets.FashionMNIST( + root='../datasets', train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST( - root='./data', train=False, transform=transforms.ToTensor()) +test_dataset = datasets.FashionMNIST( + root='../datasets', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # 定义简单的前馈神经网络 -class Neuralnetwork(nn.Module): +class neuralNetwork(nn.Module): def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim): - super(Neuralnetwork, self).__init__() - self.layer1 = nn.Linear(in_dim, n_hidden_1) - self.layer2 = nn.Linear(n_hidden_1, n_hidden_2) - self.layer3 = nn.Linear(n_hidden_2, out_dim) + super(neuralNetwork, self).__init__() + self.layer1 = nn.Sequential( + nn.Linear(in_dim, n_hidden_1), + nn.ReLU(True)) + self.layer2 = nn.Sequential( + nn.Linear(n_hidden_1, n_hidden_2), + nn.ReLU(True)) + self.layer3 = nn.Sequential( + nn.Linear(n_hidden_2, out_dim), + nn.ReLU(True)) def forward(self, x): x = self.layer1(x) @@ -37,68 +44,54 @@ def forward(self, x): x = self.layer3(x) return x - -model = Neuralnetwork(28 * 28, 300, 100, 10) -if torch.cuda.is_available(): +model = neuralNetwork(28 * 28, 300, 100, 10) +if use_gpu: model = model.cuda() criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(model.parameters(), lr=learning_rate) +optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) -for epoch in range(num_epoches): - print('epoch {}'.format(epoch + 1)) +for epoch in range(num_epochs): print('*' * 10) + print(f'epoch {epoch+1}') running_loss = 0.0 running_acc = 0.0 for i, data in enumerate(train_loader, 1): img, label = data img = img.view(img.size(0), -1) - if torch.cuda.is_available(): - img = Variable(img).cuda() - label = Variable(label).cuda() - else: - img = Variable(img) - label = Variable(label) + if use_gpu: + img = img.cuda() + label = label.cuda() # 向前传播 out = model(img) loss = criterion(out, label) - running_loss += loss.data[0] * label.size(0) + running_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - running_acc += num_correct.data[0] + running_acc += (pred == label).float().mean() # 向后传播 optimizer.zero_grad() loss.backward() optimizer.step() if i % 300 == 0: - print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, num_epoches, running_loss / (batch_size * i), - running_acc / (batch_size * i))) - print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, running_loss / (len(train_dataset)), running_acc / (len( - train_dataset)))) + print(f'[{epoch+1}/{num_epochs}] Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') + print(f'Finish {epoch+1} epoch, Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') model.eval() eval_loss = 0. eval_acc = 0. for data in test_loader: img, label = data img = img.view(img.size(0), -1) - if torch.cuda.is_available(): - img = Variable(img, volatile=True).cuda() - label = Variable(label, volatile=True).cuda() - else: - img = Variable(img, volatile=True) - label = Variable(label, volatile=True) - out = model(img) - loss = criterion(out, label) - eval_loss += loss.data[0] * label.size(0) + if use_gpu: + img = img.cuda() + label = label.cuda() + with torch.no_grad(): + out = model(img) + loss = criterion(out, label) + eval_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - eval_acc += num_correct.data[0] - print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( - test_dataset)), eval_acc / (len(test_dataset)))) - print() + eval_acc += (pred == label).float().mean() + print(f'Test Loss: {eval_loss/len(test_loader):.6f}, Acc: {eval_acc/len(test_loader):.6f}\n') # 保存模型 torch.save(model.state_dict(), './neural_network.pth') diff --git a/08-AutoEncoder/conv_autoencoder.py b/08-AutoEncoder/conv_autoencoder.py index 7a211da..144f3be 100644 --- a/08-AutoEncoder/conv_autoencoder.py +++ b/08-AutoEncoder/conv_autoencoder.py @@ -27,7 +27,7 @@ def to_img(x): img_transform = transforms.Compose([ transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + transforms.Normalize((0.5), (0.5)) ]) dataset = MNIST('./data', transform=img_transform) @@ -66,6 +66,7 @@ def forward(self, x): weight_decay=1e-5) for epoch in range(num_epochs): + total_loss = 0 for data in dataloader: img, _ = data img = Variable(img).cuda() @@ -76,9 +77,10 @@ def forward(self, x): optimizer.zero_grad() loss.backward() optimizer.step() + total_loss += loss.data # ===================log======================== print('epoch [{}/{}], loss:{:.4f}' - .format(epoch+1, num_epochs, loss.data[0])) + .format(epoch+1, num_epochs, total_loss)) if epoch % 10 == 0: pic = to_img(output.cpu().data) save_image(pic, './dc_img/image_{}.png'.format(epoch)) diff --git a/08-AutoEncoder/simple_autoencoder.py b/08-AutoEncoder/simple_autoencoder.py index 7129a54..a98efd6 100644 --- a/08-AutoEncoder/simple_autoencoder.py +++ b/08-AutoEncoder/simple_autoencoder.py @@ -28,7 +28,7 @@ def to_img(x): img_transform = transforms.Compose([ transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + transforms.Normalize([0.5], [0.5]) ]) dataset = MNIST('./data', transform=img_transform) diff --git a/11-backward/backward.py b/11-backward/backward.py index 5329269..f9493d6 100644 --- a/11-backward/backward.py +++ b/11-backward/backward.py @@ -50,10 +50,10 @@ z = t.mm(x, y) jacobian = t.zeros((2, 2)) -z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz2/dx1 +z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz1/dx2 jacobian[:, 0] = x.grad.data x.grad.data.zero_() -z.backward(t.FloatTensor([[0, 1]])) # dz1/dx2, dz2/dx2 +z.backward(t.FloatTensor([[0, 1]])) # dz2/dx1, dz2/dx2 jacobian[:, 1] = x.grad.data print('=========jacobian========') print('x') @@ -63,4 +63,4 @@ print('compute result') print(z.data) print('jacobian matrix is') -print(jacobian) \ No newline at end of file +print(jacobian) diff --git a/README.md b/README.md index 777c2ec..2230b06 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,6 @@ # pytorch-beginner -toy code for pytorch beginner +Toy project for pytorch beginner with simplest code. + +## Requirements +python 3.7 +pytorch 1.0.0+ \ No newline at end of file