经典的卷积神经网络及其Pytorch代码实现


1.LeNet

  LeNet是指LeNet-5,它是第一个成功应用于数字识别的卷积神经网络。在MNIST数据集上,可以达到99.2%的准确率。LeNet-5模型总共有7层,包括两个卷积层,两个池化层,两个全连接层和一个输出层。

 

import torch
import torch.nn as nn
from torch.autograd import Variable
#方形卷积核和等长的步长
m1=nn.Conv2d(16,33,3,stride=2)
#非长方形卷积核,非等长的步长和边界填充
m2=nn.Conv2d(16,33,(3,5),stride=(2,1),padding=(4,2))
#非方形卷积核,非等长的步长,边界填充和空间间隔
m3=nn.Conv2d(16,33,(3,5),stride=(2,1),padding=(4,2),dilation=(3,1))
input=Variable(torch.randn(20,16,50,100))
output=m2(input) 
####LeNet的PyTorch实现
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        self.conv1=nn.Conv2d(3,6,5)
        self.conv2=nn.Conv2d(6,16,5)
        self.fc1=nn.Linear(16*5*5,120)
        self.fc2=nn.Linear(120,84)
        self.fc3=nn.Linear(84,10)
    def forward(self,x):
        out=F.relu(self.conv1(x))
        out=F.max_pool2d(out,2)
        out=F.relu(self.conv2(out))
        out=F.max_pool2d(out,2)
        #这句话一般出现在model类的forward函数中,具体位置一般都是在调用分类器之前。
        #分类器是一个简单的nn.Linear()结构,输入输出都是维度为一的值,x = x.view(x.size(0), -1)  
        #这句话的出现就是为了将前面多维度的tensor展平成一维
        #x = x.view(batchsize, -1)中batchsize指转换后有几行,
        #而-1指在不告诉函数有多少列的情况下,根据原tensor数据和batchsize自动分配列数。
        out=out.view(out.size(0),-1)
        out=F.relu(self.fc1(out))
        our=F.relu(self.fc2(out))
        out=self.fc3(out)
        return out

  2.AlexNet

  AlexNet具有更深的网络结构,使用层叠的卷积层,同时增加了Dropout和数据增强,并使用ReLU代替了之前的sigmoid函数,采用多GPU训练。

  AlexNet共8层,前5层为卷积层,后3层为全连接层。

#####AlexNet的PyTorch实现
class AlexNet(nn.Module):
    def __init__(self,num_classes):
        super(AlexNet,self).__init__()
        self.features=nn.Sequential(
            nn.Conv2d(3,96,kernel_size=11,stride=4,padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3,stride=2),
            nn.Conv2d(96,256,kernel_size=5,padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3,stride=2),
            nn.Conv2d(256,384,kernel_size=3,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384,384,kernel_size=3,padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384,256,kernel_size=3,padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3,stride=2),
        )
        self.classifier=nn.Sequential(
            nn.Dropout(),
            nn.Linear(256*6*6,4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),
            nn.Linear(4096,4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096,num_classes),
        )
        def forward(self,x):
            x=self.features()
            x=x.view(x.size(0),256*6*6)
            x=self.classifier(x)
            return x

  3.VGGNet

  VGGNet采用了几个3*3的卷积核代替AlexNet中较大的卷积核,模型由若干卷积层和池化层堆叠而成。

####VGGNet的实现
cfg={
    'VGG11':[64,'M',128,'M',256,256,'M',512,512,'M',512,512,'M'],
    'VGG13':[64,64,'M',128,128,'M',256,256,'M',512,512,'M',512,512,'M'],
    'VGG16':[64,64,'M',128,128,'M',256,256,256,'M',512,512,512,'M',512,512,512,'M'],
    'VGG19':[64,64,'M',128,128,'M',256,256,256,256,'M',512,512,512,512,'M',512,512,512,512,'M'],
}
class VGG(nn.Module):
    def __init__(self,vgg_name):
        super(VGG,self).__init__()
        self.features=self._make_layers(cfg[vgg_name])
        self.classifier=nn.Linear(512,10)
    def forward(self,x):
        out=self.features(x)
        out=out.view(out.size(0),-1)
        out=self.classifier(out)
        return out
    def _make_layers(self,cfg):
        layers=[]
        in_channels=3
        for x in cfg:
            if x =='M':
                layers+=[nn.MaxPool2d(kernel_size=2,stride=2)]
            else:
                layers+=[nn.Conv2d(in_channels,x,kernal_size=3,padding=1),nn.BatchNorm2d(x),nn.ReLU(inplace=True)]
                in_channels=x
            layers+=[nn.AvgPool2d(kernel_size=1,stride=1)]
            return nn.Sequential(*layers)

  4.GooLeNet

'''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F

# 编写卷积+bn+relu模块
class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channals, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channals, **kwargs)
        self.bn = nn.BatchNorm2d(out_channals)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        return F.relu(x)

# 编写Inception模块
class Inception(nn.Module):
    def __init__(self, in_planes,
                 n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
        super(Inception, self).__init__()
        # 1x1 conv branch
        self.b1 = BasicConv2d(in_planes, n1x1, kernel_size=1)

        # 1x1 conv -> 3x3 conv branch
        self.b2_1x1_a = BasicConv2d(in_planes, n3x3red, 
                                    kernel_size=1)
        self.b2_3x3_b = BasicConv2d(n3x3red, n3x3, 
                                    kernel_size=3, padding=1)

        # 1x1 conv -> 3x3 conv -> 3x3 conv branch
        self.b3_1x1_a = BasicConv2d(in_planes, n5x5red, 
                                    kernel_size=1)
        self.b3_3x3_b = BasicConv2d(n5x5red, n5x5, 
                                    kernel_size=3, padding=1)
        self.b3_3x3_c = BasicConv2d(n5x5, n5x5, 
                                    kernel_size=3, padding=1)

        # 3x3 pool -> 1x1 conv branch
        self.b4_pool = nn.MaxPool2d(3, stride=1, padding=1)
        self.b4_1x1 = BasicConv2d(in_planes, pool_planes, 
                                  kernel_size=1)

    def forward(self, x):
        y1 = self.b1(x)
        y2 = self.b2_3x3_b(self.b2_1x1_a(x))
        y3 = self.b3_3x3_c(self.b3_3x3_b(self.b3_1x1_a(x)))
        y4 = self.b4_1x1(self.b4_pool(x))
        # y的维度为[batch_size, out_channels, C_out,L_out]
        # 合并不同卷积下的特征图
        return torch.cat([y1, y2, y3, y4], 1)


class GoogLeNet(nn.Module):
    def __init__(self):
        super(GoogLeNet, self).__init__()
        self.pre_layers = BasicConv2d(3, 192, 
                                      kernel_size=3, padding=1)

        self.a3 = Inception(192,  64,  96, 128, 16, 32, 32)
        self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)

        self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)

        self.a4 = Inception(480, 192,  96, 208, 16,  48,  64)
        self.b4 = Inception(512, 160, 112, 224, 24,  64,  64)
        self.c4 = Inception(512, 128, 128, 256, 24,  64,  64)
        self.d4 = Inception(512, 112, 144, 288, 32,  64,  64)
        self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)

        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)

        self.avgpool = nn.AvgPool2d(8, stride=1)
        self.linear = nn.Linear(1024, 10)

    def forward(self, x):
        out = self.pre_layers(x)
        out = self.a3(out)
        out = self.b3(out)
        out = self.maxpool(out)
        out = self.a4(out)
        out = self.b4(out)
        out = self.c4(out)
        out = self.d4(out)
        out = self.e4(out)
        out = self.maxpool(out)
        out = self.a5(out)
        out = self.b5(out)
        out = self.avgpool(out)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out


def test():
    net = GoogLeNet()
    x = torch.randn(1,3,32,32)
    y = net(x)
    print(y.size())

test()

  


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM