简单分析一下主流的几种神经网络

LeNet

LetNet作为卷积神经网络中的HelloWorld,它的结构及其的简单，1998年由LeCun提出

import torch.nn as nn
import torch

class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()

layer1 = nn.Sequential()
# Pool with 2x2 average kernel+2 stride:14×14×6
self.layer1 = layer1

layer2 = nn.Sequential()
# Convolution with 5x5 kernel (no pad):10×10×16
# Pool with 2x2 average kernel+2 stride: 5x5×16
self.layer2 = layer2

layer3 = nn.Sequential()
# 5 = ((28/2)-4)/2
layer3.add_module('fc1', nn.Linear(16 * 5 * 5, 120))
self.layer3 = layer3

def forward(self, x):
x = self.layer1(x)
# print(x.size())
x = self.layer2(x)
# print(x.size())
# 展平x
x = torch.flatten(x, 1)
x = self.layer3(x)
return x

# 测试
test_data = torch.rand(1, 1, 28, 28)
model = LeNet()
model(test_data)


tensor([[ 0.0067, -0.0431,  0.1072,  0.1275,  0.0143,  0.0865, -0.0490, -0.0936,


AlexNet

import torch.nn as nn
import torch

class AlexNet(nn.Module):
def __init__(self, num_class):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4)),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(True),
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2),
)

self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256*5*5, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_class)
)

def forward(self, x):
x = self.features(x)
print(x.size())
x = x.view(x.size(0), 256 * 5 * 5)
x = self.classifier(x)
return x

# 测试
test_data = torch.rand(1, 3, 224, 224)
model = AlexNet(10)
model(test_data)


torch.Size([1, 256, 5, 5])
tensor([[-0.0044,  0.0114,  0.0032, -0.0099,  0.0035, -0.0024,  0.0103, -0.0194,


VggNet

VggNet是ImageNet 2014年的亚军，总的来说就是它使用了更小的滤波器，用了更深的结构来提升深度学习的效果，从图里面可以看出来这一点，它没有使用11*11这么大的滤波器，取而代之的使用的都是3*3这种小的滤波器，它之所以使用很多小的滤波器，是因为层叠很多小的滤波器的感受野和一个大的滤波器的感受野是相同的，还能减少参数。

import torch.nn as nn

class VGG(nn.Module):
def __init__(self, num_class):
super(VGG, self).__init__()

self.features = nn.Sequential(
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(True),
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(True),
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(True),
nn.ReLU(True),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096), nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_class),
)
self._initialize_weights()

def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x



使用卷积神经网络实现对Minist数据集的预测

import matplotlib.pyplot as plt
import torch.utils.data
import torchvision.datasets
import os
import torch.nn as nn
from torchvision import transforms

class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=(3, 3)),
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
)

self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=(3, 3)),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)

self.layer3 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=(3, 3)),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)

self.layer4 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=(3, 3)),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)

self.fc = nn.Sequential(
nn.Linear(128 * 4 * 4, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 128),
nn.Linear(128, 10)
)

def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

data_tf = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])]
)

train_dataset = torchvision.datasets.MNIST(root='F:/机器学习/pytorch/书/data/mnist', train=True,

test_dataset = torchvision.datasets.MNIST(root='F:/机器学习/pytorch/书/data/mnist', train=False,

batch_size = 100
dataset=train_dataset, batch_size=batch_size
)

dataset=test_dataset, batch_size=batch_size
)

model = CNN()
model = model.cuda()
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()

# 节约时间，三次够了
iter_step = 3
loss1 = []
loss2 = []
for step in range(iter_step):
loss1_count = 0
loss2_count = 0
images = images.cuda()
labels = labels.cuda()
images = images.reshape(-1, 1, 28, 28)
output = model(images)
pred = output.squeeze()

loss = criterion(pred, labels)
loss.backward()
optimizer.step()

_, pred = torch.max(pred, 1)

loss1_count += int(torch.sum(pred == labels)) / 100
# 测试
else:
test_loss = 0
accuracy = 0
images = images.cuda()
labels = labels.cuda()
pred = model(images.reshape(-1, 1, 28, 28))
_, pred = torch.max(pred, 1)
loss2_count += int(torch.sum(pred == labels)) / 100

print(f'第{step}次训练：训练准确率：{loss1[len(loss1)-1]}，测试准确率：{loss2[len(loss2)-1]}')

plt.plot(loss1, label='Training loss')
plt.plot(loss2, label='Validation loss')
plt.legend()


第0次训练：训练准确率：0.9646166666666718，测试准确率：0.9868999999999996

<matplotlib.legend.Legend at 0x21f03092fd0>


posted @ 2021-12-03 23:15  [X_O]  阅读(12)  评论(0编辑  收藏  举报