Pytorch实现CNN卷积神经网络

柔情只为你懂 2022-06-11 07:39 398阅读 0赞

前言

Pytorch实现CNN卷积神将网络,以MNIST数据集为例

环境

Mac OS

Python 3

代码

  1. import torch
  2. import torch.nn as nn
  3. from torch.autograd import Variable
  4. import torch.utils.data as Data
  5. import torchvision
  6. import matplotlib.pyplot as plt
  7. torch.manual_seed(1) # reproducible
  8. # Hyper Parameters
  9. EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch
  10. BATCH_SIZE = 50
  11. LR = 0.001 # learning rate
  12. DOWNLOAD_MNIST = True # set to False if you have downloaded
  13. # Mnist digits dataset
  14. train_data = torchvision.datasets.MNIST(
  15. root='./mnist/',
  16. train=True, # this is training data
  17. transform=torchvision.transforms.ToTensor(), # Converts a PIL.Image or numpy.ndarray to
  18. # torch.FloatTensor of shape (C x H x W) and normalize in the range [0.0, 1.0]
  19. download=DOWNLOAD_MNIST, # download it if you don't have it
  20. )
  21. # plot one example
  22. print(train_data.train_data.size()) # (60000, 28, 28)
  23. print(train_data.train_labels.size()) # (60000)
  24. plt.imshow(train_data.train_data[0].numpy(), cmap='gray')
  25. plt.title('%i' % train_data.train_labels[0])
  26. plt.show()
  27. # Data Loader for easy mini-batch return in training, the image batch shape will be (50, 1, 28, 28)
  28. train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
  29. # convert test data into Variable, pick 2000 samples to speed up testing
  30. test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
  31. test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1), volatile=True).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
  32. test_y = test_data.test_labels[:2000]
  33. class CNN(nn.Module):
  34. def __init__(self):
  35. super(CNN, self).__init__()
  36. self.conv1 = nn.Sequential( # input shape (1, 28, 28)
  37. nn.Conv2d(
  38. in_channels=1, # input height
  39. out_channels=16, # n_filters
  40. kernel_size=5, # filter size
  41. stride=1, # filter movement/step
  42. padding=2, # if want same width and length of this image after con2d, padding=(kernel_size-1)/2 if stride=1
  43. ), # output shape (16, 28, 28)
  44. nn.ReLU(), # activation
  45. nn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 14, 14)
  46. )
  47. self.conv2 = nn.Sequential( # input shape (1, 28, 28)
  48. nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 14, 14)
  49. nn.ReLU(), # activation
  50. nn.MaxPool2d(2), # output shape (32, 7, 7)
  51. )
  52. self.out = nn.Linear(32 * 7 * 7, 10) # fully connected layer, output 10 classes
  53. def forward(self, x):
  54. x = self.conv1(x)
  55. x = self.conv2(x)
  56. x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
  57. output = self.out(x)
  58. return output, x # return x for visualization
  59. cnn = CNN()
  60. print(cnn) # net architecture
  61. optimizer = torch.optim.Adam(cnn.parameters(), lr=LR) # optimize all cnn parameters
  62. loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
  63. # following function (plot_with_labels) is for visualization, can be ignored if not interested
  64. from matplotlib import cm
  65. try: from sklearn.manifold import TSNE; HAS_SK = True
  66. except: HAS_SK = False; print('Please install sklearn for layer visualization')
  67. def plot_with_labels(lowDWeights, labels):
  68. plt.cla()
  69. X, Y = lowDWeights[:, 0], lowDWeights[:, 1]
  70. for x, y, s in zip(X, Y, labels):
  71. c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9)
  72. plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title('Visualize last layer'); plt.show(); plt.pause(0.01)
  73. plt.ion()
  74. # training and testing
  75. for epoch in range(EPOCH):
  76. for step, (x, y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader
  77. b_x = Variable(x) # batch x
  78. b_y = Variable(y) # batch y
  79. output = cnn(b_x)[0] # cnn output
  80. loss = loss_func(output, b_y) # cross entropy loss
  81. optimizer.zero_grad() # clear gradients for this training step
  82. loss.backward() # backpropagation, compute gradients
  83. optimizer.step() # apply gradients
  84. if step % 50 == 0:
  85. test_output, last_layer = cnn(test_x)
  86. pred_y = torch.max(test_output, 1)[1].data.squeeze()
  87. accuracy = sum(pred_y == test_y) / float(test_y.size(0))
  88. print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0], '| test accuracy: %.2f' % accuracy)
  89. if HAS_SK:
  90. # Visualization of trained flatten layer (T-SNE)
  91. tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
  92. plot_only = 500
  93. low_dim_embs = tsne.fit_transform(last_layer.data.numpy()[:plot_only, :])
  94. labels = test_y.numpy()[:plot_only]
  95. plot_with_labels(low_dim_embs, labels)
  96. plt.ioff()
  97. # print 10 predictions from test data
  98. test_output, _ = cnn(test_x[:10])
  99. pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
  100. print(pred_y, 'prediction number')
  101. print(test_y[:10].numpy(), 'real number')

发表评论

表情:
评论列表 (有 0 条评论,398人围观)

还没有评论,来说两句吧...

相关阅读

    相关 神经网络CNN

    系列文章目录 上一篇文章简单的介绍了卷积神经网络及一些基础知识,比如说步长,填充,互相关运算等,这篇文章讲介绍卷积神经网络的组成及常见的几种神经网络。  [CNN简单介