-
Pytorch CNN with Mnist카테고리 없음 2021. 5. 14. 21:38반응형
lab-10_1_mnist_cnn In [5]:import torch import torchvision.datasets as dsets import torchvision.transforms as transforms import torch.nn as nn import torch.nn.init
In [2]:# gpu 사용이 가능한지 ??? device = 'cuda' if torch.cuda.is_available() else 'cpu' # gpu 사용설정 후 , random_value 를 위한 seed 설정 # for reproducibility torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777)
In [3]:# cnn 학습 parameters learning_rate = 0.001 training_epochs = 15 batch_size = 100
In [9]:# MNIST dataset mnist_train = dsets.MNIST(root='MNIST_data/', train=True, transform=transforms.ToTensor(), download=True) mnist_test = dsets.MNIST(root='MNIST_data/', train=False, transform=transforms.ToTensor(), download=True)
In [11]:# batch_size 와 dataset 을 통해 dataLoader 로딩 data_loader = torch.utils.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)
In [13]:# CNN Model (2 conv layers) class CNN(torch.nn.Module): def __init__(self): super(CNN, self).__init__() # L1 ImgIn shape=(?, 28, 28, 1) # Conv -> (?, 28, 28, 32) # Pool -> (?, 14, 14, 32) self.layer1 = torch.nn.Sequential( torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # L2 ImgIn shape=(?, 14, 14, 32) # Conv ->(?, 14, 14, 64) # Pool ->(?, 7, 7, 64) self.layer2 = torch.nn.Sequential( torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), torch.nn.ReLU(), torch.nn.MaxPool2d(kernel_size=2, stride=2)) # Final FC 7x7x64 inputs -> 10 outputs self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True) torch.nn.init.xavier_uniform_(self.fc.weight) def forward(self, x): out = self.layer1(x) out = self.layer2(out) out = out.view(out.size(0), -1) # Flatten them for FC out = self.fc(out) return out
In [15]:# GPU 사용이 가능한가??? model = CNN().to(device)
In [16]:# define cost/loss & optimizer criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed. optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
In [17]:total_batch = len(data_loader) print('Learning started') for epoch in range(training_epochs): avg_cost = 0 for X,Y in data_loader: # X = image, Y=Label X = X.to(device) Y = Y.to(device) optimizer.zero_grad() hypothesis = model(X) cost = criterion(hypothesis,Y) cost.backward() optimizer.step() avg_cost+= cost/total_batch print('[Epoch: {:>4}] cost = {:>.9}'.format(epoch + 1, avg_cost)) print("Learning Finished")
Learning started [Epoch: 1] cost = 0.230441451 [Epoch: 2] cost = 0.0588737279 [Epoch: 3] cost = 0.0437557586 [Epoch: 4] cost = 0.0344327986 [Epoch: 5] cost = 0.0283598024 [Epoch: 6] cost = 0.0232713353 [Epoch: 7] cost = 0.0193091519 [Epoch: 8] cost = 0.0167574845 [Epoch: 9] cost = 0.0143606402 [Epoch: 10] cost = 0.0120671066 [Epoch: 11] cost = 0.0107910205 [Epoch: 12] cost = 0.00846360531 [Epoch: 13] cost = 0.00608973531 [Epoch: 14] cost = 0.00732062757 [Epoch: 15] cost = 0.00569137093 Learning Finished
In [18]:with torch.no_grad(): X_test = mnist_test.test_data.view(len(mnist_test), 1, 28, 28).float().to(device) Y_test = mnist_test.test_labels.to(device) prediction = model(X_test) correct_prediction = torch.argmax(prediction,1) == Y_test # 1 이면 같냐 아니면 0 accuracy = correct_prediction.float().mean() print('Accuracy:', accuracy.item())
Accuracy: 0.9828999638557434
C:\Users\sangi\Anaconda3\envs\pytorch\lib\site-packages\torchvision\datasets\mnist.py:63: UserWarning: test_data has been renamed data warnings.warn("test_data has been renamed data") C:\Users\sangi\Anaconda3\envs\pytorch\lib\site-packages\torchvision\datasets\mnist.py:53: UserWarning: test_labels has been renamed targets warnings.warn("test_labels has been renamed targets")
반응형