From 66479b9cb69faabf5b38159072d49d3218a93015 Mon Sep 17 00:00:00 2001 From: liao xingyu Date: Fri, 12 Jan 2018 16:33:59 +0800 Subject: [PATCH 01/11] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 777c2ec..1ed7d56 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,4 @@ # pytorch-beginner toy code for pytorch beginner + +**Welcome to visit this [site](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch) to get more detailed chinese pytorch tutorial.** From c8a5d8a7244f8696e33df4ea8e66f4cdfd51fabb Mon Sep 17 00:00:00 2001 From: LiaoXingyu Date: Tue, 27 Feb 2018 15:55:00 +0800 Subject: [PATCH 02/11] fix bug --- 11-backward/backward.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/11-backward/backward.py b/11-backward/backward.py index 5329269..f9493d6 100644 --- a/11-backward/backward.py +++ b/11-backward/backward.py @@ -50,10 +50,10 @@ z = t.mm(x, y) jacobian = t.zeros((2, 2)) -z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz2/dx1 +z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz1/dx2 jacobian[:, 0] = x.grad.data x.grad.data.zero_() -z.backward(t.FloatTensor([[0, 1]])) # dz1/dx2, dz2/dx2 +z.backward(t.FloatTensor([[0, 1]])) # dz2/dx1, dz2/dx2 jacobian[:, 1] = x.grad.data print('=========jacobian========') print('x') @@ -63,4 +63,4 @@ print('compute result') print(z.data) print('jacobian matrix is') -print(jacobian) \ No newline at end of file +print(jacobian) From 43f318e0ef864c572b5e0a2e164bec60f4548727 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=9D=B4=EC=9E=AC=EC=9C=B5=23=E6=9D=8E=E8=BD=BD=E9=9A=86?= Date: Thu, 23 May 2019 15:49:11 +0900 Subject: [PATCH 03/11] Update Logistic_Regression.py fix minor error (function name) --- 02-Logistic Regression/Logistic_Regression.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/02-Logistic Regression/Logistic_Regression.py b/02-Logistic Regression/Logistic_Regression.py index cd1682b..adbba7d 100644 --- a/02-Logistic Regression/Logistic_Regression.py +++ b/02-Logistic Regression/Logistic_Regression.py @@ -25,17 +25,17 @@ # 定义 Logistic Regression 模型 -class Logstic_Regression(nn.Module): +class Logistic_Regression(nn.Module): def __init__(self, in_dim, n_class): - super(Logstic_Regression, self).__init__() - self.logstic = nn.Linear(in_dim, n_class) + super(Logistic_Regression, self).__init__() + self.logistic = nn.Linear(in_dim, n_class) def forward(self, x): - out = self.logstic(x) + out = self.logistic(x) return out -model = Logstic_Regression(28 * 28, 10) # 图片大小是28x28 +model = Logistic_Regression(28 * 28, 10) # 图片大小是28x28 use_gpu = torch.cuda.is_available() # 判断是否有GPU加速 if use_gpu: model = model.cuda() From ebdac001eda8bb260efb445d1fed236b78bfa3ef Mon Sep 17 00:00:00 2001 From: user01 Date: Sun, 23 Jun 2019 17:11:40 +0800 Subject: [PATCH 04/11] test git --- 03-Neural Network/neural_network.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/03-Neural Network/neural_network.py b/03-Neural Network/neural_network.py index f411f6b..9cc98c1 100644 --- a/03-Neural Network/neural_network.py +++ b/03-Neural Network/neural_network.py @@ -1,9 +1,11 @@ -__author__ = 'SherlockLiao' +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" import torch from torch import nn, optim -from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import transforms from torchvision import datasets From 5e7cf32cf99f70de92bfa6aaaa277e1457f240c8 Mon Sep 17 00:00:00 2001 From: liaoxingyu Date: Sun, 23 Jun 2019 17:21:44 +0800 Subject: [PATCH 05/11] test git --- 03-Neural Network/neural_network.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/03-Neural Network/neural_network.py b/03-Neural Network/neural_network.py index 9cc98c1..f38a519 100644 --- a/03-Neural Network/neural_network.py +++ b/03-Neural Network/neural_network.py @@ -29,9 +29,15 @@ class Neuralnetwork(nn.Module): def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim): super(Neuralnetwork, self).__init__() - self.layer1 = nn.Linear(in_dim, n_hidden_1) - self.layer2 = nn.Linear(n_hidden_1, n_hidden_2) - self.layer3 = nn.Linear(n_hidden_2, out_dim) + self.layer1 = nn.Sequential( + nn.Linear(in_dim, n_hidden_1), + nn.ReLU(True)) + self.layer2 = nn.Sequential( + nn.Linear(n_hidden_1, n_hidden_2), + nn.ReLU(True)) + self.layer3 = nn.Sequential( + nn.Linear(n_hidden_2, out_dim), + nn.ReLU(True)) def forward(self, x): x = self.layer1(x) From 996a53b3b29a52bad69cd8348395dc2a40a685d4 Mon Sep 17 00:00:00 2001 From: liaoxingyu Date: Thu, 15 Aug 2019 08:50:58 +0800 Subject: [PATCH 06/11] Update pytorch.10 Finish linear regression Finish logistic regression Finish neural network --- 01-Linear Regression/Linear_Regression.py | 32 ++++--- 02-Logistic Regression/Logistic_Regression.py | 87 +++++++++---------- 03-Neural Network/neural_network.py | 79 +++++++---------- README.md | 6 +- datasets | 1 + 5 files changed, 95 insertions(+), 110 deletions(-) create mode 120000 datasets diff --git a/01-Linear Regression/Linear_Regression.py b/01-Linear Regression/Linear_Regression.py index 3920aa8..cfd9dec 100644 --- a/01-Linear Regression/Linear_Regression.py +++ b/01-Linear Regression/Linear_Regression.py @@ -1,10 +1,14 @@ -__author__ = 'SherlockLiao' +# encoding: utf-8 +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" +import matplotlib.pyplot as plt +import numpy as np import torch -from torch import nn, optim +from torch import nn from torch.autograd import Variable -import numpy as np -import matplotlib.pyplot as plt x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], [9.779], [6.182], [7.59], [2.167], [7.042], @@ -21,9 +25,9 @@ # Linear Regression Model -class LinearRegression(nn.Module): +class linearRegression(nn.Module): def __init__(self): - super(LinearRegression, self).__init__() + super(linearRegression, self).__init__() self.linear = nn.Linear(1, 1) # input and output is 1 dimension def forward(self, x): @@ -31,16 +35,16 @@ def forward(self, x): return out -model = LinearRegression() +model = linearRegression() # 定义loss和优化函数 criterion = nn.MSELoss() -optimizer = optim.SGD(model.parameters(), lr=1e-4) +optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) # 开始训练 num_epochs = 1000 for epoch in range(num_epochs): - inputs = Variable(x_train) - target = Variable(y_train) + inputs = x_train + target = y_train # forward out = model(inputs) @@ -51,12 +55,14 @@ def forward(self, x): optimizer.step() if (epoch+1) % 20 == 0: - print('Epoch[{}/{}], loss: {:.6f}' - .format(epoch+1, num_epochs, loss.data[0])) + print(f'Epoch[{epoch+1}/{num_epochs}], loss: {loss.item():.6f}') model.eval() -predict = model(Variable(x_train)) +with torch.no_grad(): + predict = model(x_train) predict = predict.data.numpy() + +fig = plt.figure(figsize=(10, 5)) plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') plt.plot(x_train.numpy(), predict, label='Fitting Line') # 显示图例 diff --git a/02-Logistic Regression/Logistic_Regression.py b/02-Logistic Regression/Logistic_Regression.py index cd1682b..e2c352a 100644 --- a/02-Logistic Regression/Logistic_Regression.py +++ b/02-Logistic Regression/Logistic_Regression.py @@ -1,83 +1,79 @@ -__author__ = 'SherlockLiao' +# encoding: utf-8 +""" +@author: liaoxingyu +@contact: sherlockliao01@gmail.com +""" + + +import time import torch -from torch import nn, optim import torch.nn.functional as F -from torch.autograd import Variable +from torch import nn from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision import datasets -import time +from torchvision import datasets, transforms + # 定义超参数 -batch_size = 32 +batch_size = 64 learning_rate = 1e-3 -num_epoches = 100 +num_epochs = 100 # 下载训练集 MNIST 手写数字训练集 -train_dataset = datasets.MNIST( - root='./data', train=True, transform=transforms.ToTensor(), download=True) +train_dataset = datasets.FashionMNIST( + root='../datasets', train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST( - root='./data', train=False, transform=transforms.ToTensor()) +test_dataset = datasets.FashionMNIST( + root='../datasets', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) - # 定义 Logistic Regression 模型 -class Logstic_Regression(nn.Module): +class logsticRegression(nn.Module): def __init__(self, in_dim, n_class): - super(Logstic_Regression, self).__init__() + super(logsticRegression, self).__init__() self.logstic = nn.Linear(in_dim, n_class) def forward(self, x): out = self.logstic(x) return out - -model = Logstic_Regression(28 * 28, 10) # 图片大小是28x28 +model = logsticRegression(28 * 28, 10) # 图片大小是28x28 use_gpu = torch.cuda.is_available() # 判断是否有GPU加速 if use_gpu: model = model.cuda() # 定义loss和optimizer criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(model.parameters(), lr=learning_rate) +optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # 开始训练 -for epoch in range(num_epoches): +for epoch in range(num_epochs): print('*' * 10) - print('epoch {}'.format(epoch + 1)) + print(f'epoch {epoch+1}') since = time.time() running_loss = 0.0 running_acc = 0.0 + model.train() for i, data in enumerate(train_loader, 1): img, label = data img = img.view(img.size(0), -1) # 将图片展开成 28x28 if use_gpu: - img = Variable(img).cuda() - label = Variable(label).cuda() - else: - img = Variable(img) - label = Variable(label) + img = img.cuda() + label = label.cuda() # 向前传播 out = model(img) loss = criterion(out, label) - running_loss += loss.data[0] * label.size(0) + running_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - running_acc += num_correct.data[0] + running_acc += (pred==label).float().mean() # 向后传播 optimizer.zero_grad() loss.backward() optimizer.step() if i % 300 == 0: - print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, num_epoches, running_loss / (batch_size * i), - running_acc / (batch_size * i))) - print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, running_loss / (len(train_dataset)), running_acc / (len( - train_dataset)))) + print(f'[{epoch+1}/{num_epochs}] Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') + print(f'Finish {epoch+1} epoch, Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') model.eval() eval_loss = 0. eval_acc = 0. @@ -85,21 +81,16 @@ def forward(self, x): img, label = data img = img.view(img.size(0), -1) if use_gpu: - img = Variable(img, volatile=True).cuda() - label = Variable(label, volatile=True).cuda() - else: - img = Variable(img, volatile=True) - label = Variable(label, volatile=True) - out = model(img) - loss = criterion(out, label) - eval_loss += loss.data[0] * label.size(0) + img = img.cuda() + label = label.cuda() + with torch.no_grad(): + out = model(img) + loss = criterion(out, label) + eval_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - eval_acc += num_correct.data[0] - print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( - test_dataset)), eval_acc / (len(test_dataset)))) - print('Time:{:.1f} s'.format(time.time() - since)) - print() + eval_acc += (pred == label).float().mean() + print(f'Test Loss: {eval_loss/len(test_loader):.6f}, Acc: {eval_acc/len(test_loader):.6f}') + print(f'Time:{(time.time()-since):.1f} s') # 保存模型 torch.save(model.state_dict(), './logstic.pth') diff --git a/03-Neural Network/neural_network.py b/03-Neural Network/neural_network.py index f38a519..f728a0e 100644 --- a/03-Neural Network/neural_network.py +++ b/03-Neural Network/neural_network.py @@ -4,31 +4,30 @@ """ import torch -from torch import nn, optim - +from torch import nn from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision import datasets +from torchvision import datasets, transforms -batch_size = 32 +batch_size = 64 learning_rate = 1e-2 -num_epoches = 50 +num_epochs = 50 +use_gpu = torch.cuda.is_available() # 下载训练集 MNIST 手写数字训练集 -train_dataset = datasets.MNIST( - root='./data', train=True, transform=transforms.ToTensor(), download=True) +train_dataset = datasets.FashionMNIST( + root='../datasets', train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST( - root='./data', train=False, transform=transforms.ToTensor()) +test_dataset = datasets.FashionMNIST( + root='../datasets', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # 定义简单的前馈神经网络 -class Neuralnetwork(nn.Module): +class neuralNetwork(nn.Module): def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim): - super(Neuralnetwork, self).__init__() + super(neuralNetwork, self).__init__() self.layer1 = nn.Sequential( nn.Linear(in_dim, n_hidden_1), nn.ReLU(True)) @@ -45,68 +44,54 @@ def forward(self, x): x = self.layer3(x) return x - -model = Neuralnetwork(28 * 28, 300, 100, 10) -if torch.cuda.is_available(): +model = neuralNetwork(28 * 28, 300, 100, 10) +if use_gpu: model = model.cuda() criterion = nn.CrossEntropyLoss() -optimizer = optim.SGD(model.parameters(), lr=learning_rate) +optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) -for epoch in range(num_epoches): - print('epoch {}'.format(epoch + 1)) +for epoch in range(num_epochs): print('*' * 10) + print(f'epoch {epoch+1}') running_loss = 0.0 running_acc = 0.0 for i, data in enumerate(train_loader, 1): img, label = data img = img.view(img.size(0), -1) - if torch.cuda.is_available(): - img = Variable(img).cuda() - label = Variable(label).cuda() - else: - img = Variable(img) - label = Variable(label) + if use_gpu: + img = img.cuda() + label = label.cuda() # 向前传播 out = model(img) loss = criterion(out, label) - running_loss += loss.data[0] * label.size(0) + running_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - running_acc += num_correct.data[0] + running_acc += (pred == label).float().mean() # 向后传播 optimizer.zero_grad() loss.backward() optimizer.step() if i % 300 == 0: - print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, num_epoches, running_loss / (batch_size * i), - running_acc / (batch_size * i))) - print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format( - epoch + 1, running_loss / (len(train_dataset)), running_acc / (len( - train_dataset)))) + print(f'[{epoch+1}/{num_epochs}] Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') + print(f'Finish {epoch+1} epoch, Loss: {running_loss/i:.6f}, Acc: {running_acc/i:.6f}') model.eval() eval_loss = 0. eval_acc = 0. for data in test_loader: img, label = data img = img.view(img.size(0), -1) - if torch.cuda.is_available(): - img = Variable(img, volatile=True).cuda() - label = Variable(label, volatile=True).cuda() - else: - img = Variable(img, volatile=True) - label = Variable(label, volatile=True) - out = model(img) - loss = criterion(out, label) - eval_loss += loss.data[0] * label.size(0) + if use_gpu: + img = img.cuda() + label = label.cuda() + with torch.no_grad(): + out = model(img) + loss = criterion(out, label) + eval_loss += loss.item() _, pred = torch.max(out, 1) - num_correct = (pred == label).sum() - eval_acc += num_correct.data[0] - print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len( - test_dataset)), eval_acc / (len(test_dataset)))) - print() + eval_acc += (pred == label).float().mean() + print(f'Test Loss: {eval_loss/len(test_loader):.6f}, Acc: {eval_acc/len(test_loader):.6f}\n') # 保存模型 torch.save(model.state_dict(), './neural_network.pth') diff --git a/README.md b/README.md index 1ed7d56..2230b06 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ # pytorch-beginner -toy code for pytorch beginner +Toy project for pytorch beginner with simplest code. -**Welcome to visit this [site](https://github.com/SherlockLiao/code-of-learn-deep-learning-with-pytorch) to get more detailed chinese pytorch tutorial.** +## Requirements +python 3.7 +pytorch 1.0.0+ \ No newline at end of file diff --git a/datasets b/datasets new file mode 120000 index 0000000..30f9f15 --- /dev/null +++ b/datasets @@ -0,0 +1 @@ +../datasets/ \ No newline at end of file From 052ed0d4e3d3959e86c88737fd133ef5d0180acc Mon Sep 17 00:00:00 2001 From: liaoxingyu Date: Sat, 9 Nov 2019 13:46:25 +0800 Subject: [PATCH 07/11] update --- datasets | 1 - 1 file changed, 1 deletion(-) delete mode 120000 datasets diff --git a/datasets b/datasets deleted file mode 120000 index 30f9f15..0000000 --- a/datasets +++ /dev/null @@ -1 +0,0 @@ -../datasets/ \ No newline at end of file From 1e9ccd46767feba2cc2b664eed625c6e3c9f48a1 Mon Sep 17 00:00:00 2001 From: Lei Wang <34334180+NjtechPrinceling@users.noreply.github.com> Date: Thu, 21 May 2020 19:48:19 +0800 Subject: [PATCH 08/11] fix normalize operation in transform --- 08-AutoEncoder/simple_autoencoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/08-AutoEncoder/simple_autoencoder.py b/08-AutoEncoder/simple_autoencoder.py index 7129a54..a98efd6 100644 --- a/08-AutoEncoder/simple_autoencoder.py +++ b/08-AutoEncoder/simple_autoencoder.py @@ -28,7 +28,7 @@ def to_img(x): img_transform = transforms.Compose([ transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + transforms.Normalize([0.5], [0.5]) ]) dataset = MNIST('./data', transform=img_transform) From f6b3eed016f3255bb53b776455845c4ab31ab82a Mon Sep 17 00:00:00 2001 From: Fabion Kauker Date: Mon, 10 Aug 2020 11:00:56 -0700 Subject: [PATCH 09/11] Update conv_autoencoder.py Updated to work with ```torch==1.6.0``` --- 08-AutoEncoder/conv_autoencoder.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/08-AutoEncoder/conv_autoencoder.py b/08-AutoEncoder/conv_autoencoder.py index 7a211da..005f9c0 100644 --- a/08-AutoEncoder/conv_autoencoder.py +++ b/08-AutoEncoder/conv_autoencoder.py @@ -27,7 +27,7 @@ def to_img(x): img_transform = transforms.Compose([ transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + transforms.Normalize((0.5), (0.5)) ]) dataset = MNIST('./data', transform=img_transform) @@ -64,7 +64,7 @@ def forward(self, x): criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) - +total_loss = 0 for epoch in range(num_epochs): for data in dataloader: img, _ = data @@ -77,8 +77,9 @@ def forward(self, x): loss.backward() optimizer.step() # ===================log======================== + total_loss += loss.data print('epoch [{}/{}], loss:{:.4f}' - .format(epoch+1, num_epochs, loss.data[0])) + .format(epoch+1, num_epochs, total_loss)) if epoch % 10 == 0: pic = to_img(output.cpu().data) save_image(pic, './dc_img/image_{}.png'.format(epoch)) From db103d5b8bda9dd7794396964f6e0636692fef63 Mon Sep 17 00:00:00 2001 From: Hyeongmin Moon Date: Fri, 11 Feb 2022 11:36:13 +0900 Subject: [PATCH 10/11] fix log I found total_loss is accumulated during the whole epochs(100 epochs). fixed this so that log can print right loss per epoch. --- 08-AutoEncoder/conv_autoencoder.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/08-AutoEncoder/conv_autoencoder.py b/08-AutoEncoder/conv_autoencoder.py index 005f9c0..144f3be 100644 --- a/08-AutoEncoder/conv_autoencoder.py +++ b/08-AutoEncoder/conv_autoencoder.py @@ -64,8 +64,9 @@ def forward(self, x): criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) -total_loss = 0 + for epoch in range(num_epochs): + total_loss = 0 for data in dataloader: img, _ = data img = Variable(img).cuda() @@ -76,8 +77,8 @@ def forward(self, x): optimizer.zero_grad() loss.backward() optimizer.step() + total_loss += loss.data # ===================log======================== - total_loss += loss.data print('epoch [{}/{}], loss:{:.4f}' .format(epoch+1, num_epochs, total_loss)) if epoch % 10 == 0: From a57dd7ea35c9965a41f4963dee62d38884f92a65 Mon Sep 17 00:00:00 2001 From: viraj sharma Date: Thu, 23 Oct 2025 14:53:39 +0530 Subject: [PATCH 11/11] Merge pull request #43 from virajsharma2000/performance-optimize-in-pytorch-tutorial removed unnecessary conversions of tensors in linear regression code --- 01-Linear Regression/Linear_Regression.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/01-Linear Regression/Linear_Regression.py b/01-Linear Regression/Linear_Regression.py index cfd9dec..c2488ef 100644 --- a/01-Linear Regression/Linear_Regression.py +++ b/01-Linear Regression/Linear_Regression.py @@ -5,23 +5,17 @@ """ import matplotlib.pyplot as plt -import numpy as np import torch from torch import nn from torch.autograd import Variable -x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], +x_train = torch.tensor([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], [9.779], [6.182], [7.59], [2.167], [7.042], - [10.791], [5.313], [7.997], [3.1]], dtype=np.float32) + [10.791], [5.313], [7.997], [3.1]], dtype=torch.float32) -y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], +y_train = torch.tensor([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], [3.366], [2.596], [2.53], [1.221], [2.827], - [3.465], [1.65], [2.904], [1.3]], dtype=np.float32) - - -x_train = torch.from_numpy(x_train) - -y_train = torch.from_numpy(y_train) + [3.465], [1.65], [2.904], [1.3]], dtype=torch.float32) # Linear Regression Model