Pytorch——卷积神经网络

网络的尺寸

  • 输入层

一般而言,输入层的大小应该能够被2整除很多次,常用32, 64, 96, 224

  • 卷积层

尽可能使用小尺寸的滤波器,例如3*3,滑动步长选择1。需要对输入数据体进行零填充,保证输出和输入一样的空间大小

  • 池化层

对输入数据空间进行下采样

  • 零填充

不使用的话,会导致图像边缘信息过快地损失掉

"""没有人能在一开始就想清楚,只有开始做了,你才会越来越清楚——马克·扎克伯格"""
"""一个简单的卷积神经网络"""
from torch import nn


class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()

        # 第一层
        layer1 = nn.Sequential()
        layer1.add_module('conv1', nn.Conv2d(3, 32, 3, 1, padding=1)) # b, 32, 32, 32
        layer1.add_module('relu1', nn.ReLU(True))
        layer1.add_module('pool1', nn.MaxPool2d(2, 2))# b, 32, 16, 16
        self.layer1 = layer1

        # 第二层
        layer2 = nn.Sequential()
        layer2.add_module('conv2', nn.Conv2d(32, 64, 3, 1, padding=1)) # b, 64, 16, 16
        layer2.add_module('relu2', nn.ReLU(True))
        layer2.add_module('pool2', nn.MaxPool2d(2, 2)) # b, 64, 8, 8
        self.layer2 = layer2

        # 第三层
        layer3 = nn.Sequential()
        layer3.add_module('conv2', nn.Conv2d(64, 128, 3, 1, padding=1))
        layer3.add_module('relu3', nn.ReLU(True))
        layer3.add_module('pool3', nn.MaxPool2d(2, 2))  # b, 128, 4, 4
        self.layer3 = layer3

        # 第四层(全连接层)
        layer4 = nn.Sequential()
        layer4.add_module('fc1', nn.Linear(2048, 512))
        layer4.add_module('fc_relu1', nn.ReLU(True))
        layer4.add_module('fc2', nn.Linear(512, 64))
        layer4.add_module('fc_relu2', nn.ReLU(True))
        layer4.add_module('fc3', nn.Linear(64, 10))
        self.layer4 = layer4

    def forward(self, x):
        conv1 = self.layer1(x)
        conv2 = self.layer2(conv1)
        conv3 = self.layer3(conv2)
        fc_input = conv3.view(conv3.size(0), -1)
        fc_output = self.layer4(fc_input)
        return fc_output,conv1,conv2, conv3

model = SimpleCNN()
print(model)

# ----------提取参数----------------------------------------------
"""named_parameters()给出网络层的名字和参数的迭代器
   parameters()给出网络的全部参数"""

for param in model.named_parameters():
    print(param[0])       

SimpleCNN(
(layer1): Sequential(
(conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu1): ReLU(inplace=True)
(pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1,
ceil_mode=False)
)
(layer2): Sequential(
(conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu2): ReLU(inplace=True)
(pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1,
ceil_mode=False)
)
(layer3): Sequential(
(conv2): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu3): ReLU(inplace=True)
(pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1,
ceil_mode=False)
)
(layer4): Sequential(
(fc1): Linear(in_features=2048, out_features=512, bias=True)
(fc_relu1): ReLU(inplace=True)
(fc2): Linear(in_features=512, out_features=64, bias=True)
(fc_relu2): ReLU(inplace=True)
(fc3): Linear(in_features=64, out_features=10, bias=True)
)
)

layer1.conv1.weight
layer1.conv1.bias
layer2.conv2.weight
layer2.conv2.bias
layer3.conv2.weight
layer3.conv2.bias
layer4.fc1.weight
layer4.fc1.bias
layer4.fc2.weight
layer4.fc2.bias
layer4.fc3.weight
layer4.fc3.bias

发布了234 篇原创文章 · 获赞 107 · 访问量 3万+

猜你喜欢

转载自blog.csdn.net/weixin_44478378/article/details/104756358