pytorch学习 :vgg做CIFAR10分类代码

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/xckkcxxck/article/details/82379854
# -*- coding: utf-8 -*-
"""
Created on Tue Sep  4 08:47:56 2018

@author: www
"""

import sys
sys.path.append('...')

import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torchvision.datasets import CIFAR10

#我们可以定义一个 vgg 的 block,传入三个参数,第一个是模型层数,第二个是输入的通道数,第三个是
#输出的通道数,第一层卷积接受的输入通道就是图片输入的通道数,然后输出最后的输出通道数,后面的卷积
#接受的通道数就是最后的输出通道数
def vgg_block(num_convs, in_channels, out_channels):
     net = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.ReLU(True)]
     
     for i in range(num_convs-1):#定义后面的许多层
          net.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
          net.append(nn.ReLU(True))
          
     net.append(nn.MaxPool2d(2, 2))#定义池化层
     return nn.Sequential(*net)
     
#将模型打印出来看一下结构
block_demo = vgg_block(3, 64, 128)
print(block_demo)

#首先定义输入为(1, 64, 300, 300)
input_demo = Variable(torch.zeros(1, 64, 300, 300))
output_demo = block_demo(input_demo)
print(output_demo.shape)
#可以看到输出就变为了 (1, 128, 150, 150),可以看到经过了这一个 vgg block,输入大小被减半,通道数变成了 128

#下面我们定义一个函数对这个 vgg block 进行堆叠
def vgg_stack(num_convs, channels):
     net = []
     for n, c in zip(num_convs, channels):
          in_c = c[0]
          out_c = c[1]
          net.append(vgg_block(n, in_c, out_c))
     return nn.Sequential(*net)

#作为实例,我们定义一个稍微简单一点的 vgg 结构,其中有 8 个卷积层
vgg_net = vgg_stack((1,1,2,2,2), ((3,64), (64,128), (128,256), (256, 512), (512,512)))
print(vgg_net)

#我们可以看到网络结构中有个 5 个 最大池化,说明图片的大小会减少 5 倍,我们可以验证一下,输入一张 256 x 256 
#的图片看看结果是什么
test_x = Variable(torch.zeros(1, 3, 256, 256))
test_y = vgg_net(test_x)
print(test_y.shape)     
#看到图片减小了 $2^5$ 倍,最后再加上几层全连接,就能够得到我们想要的分类输出
class vgg(nn.Module):
     def __init__(self):
          super(vgg, self).__init__()
          self.feature = vgg_net
          self.fc = nn.Sequential(
               nn.Linear(512, 1000),
               nn.ReLU(True),
               nn.Linear(100, 10)
          )
          
     def forward(self, x):
          x = self.feature(x)
          x = x.view(x.shape[0], -1)
          x = self.fc(x)
          return x
          
#然后我们可以训练我们的模型看看在 cifar10 上的效果
def data_tf(x):
     x = np.array(x, dtype='float32') / 255
     x = (x - 0.5) / 0.5
     x = x.transpose((2,0,1)) ## 将 channel 放到第一维,只是 pytorch 要求的输入方式
     x = torch.from_numpy(x)
     return x

train_set = CIFAR10('./data', train=True, transform=data_tf, download=True)
train_data = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
test_set = CIFAR10('./data', train=False, transform=data_tf, download=True)
test_data = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=False)

net = vgg()
optimizer = torch.optim.SGD(net.parameters(), lr=1e-1)
criterion = nn.CrossEntropyLoss()

from datetime import datetime

def get_acc(output, label):
     total = output.shape[0]
     _, pred_label = output.max(1)
     num_correct = (pred_label == label).sum().data[0]
     return num_correct / total

def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
     if torch.cuda.is_available():
          net = net.cuda()
     prev_time = datetime.now()
     for epoch in range(num_epochs):
          train_loss = 0
          train_acc = 0
          net = net.train()
          for im, label in train_data:
               if torch.cuda.is_available():
                    im = Variable(im.cuda())
                    label = Variable(label.cuda())
               else:
                    im = Variable(im)
                    label = Variable(label)
               #forward
               output = net(im)
               loss = criterion(output, label)
               #forward
               optimizer.zero_grad()
               loss.backward()
               optimizer.step()
               
               train_loss += loss.data[0]
               train_acc += get_acc(output, label)
          cur_time = datetime.now()
          h, remainder = divmod((cur_time-prev_time).seconds, 3600)
          m, s = divmod(remainder, 60)
          time_str = "Time %02d:%02d:%02d" % (h, m, s)
          if valid_data is not None:
               valid_loss = 0
               valid_acc = 0
               net = net.eval()
               for im, label in valid_data:
                    if torch.cuda.is_available():
                         im = Variable(im.cuda(), volatile=True)
                         label = Variable(label.cuda(), volatile=True)
                    else:
                         im = Variable(im, volatile=True)
                         label = Variable(label, volatile=True)
                    output = net(im)
                    loss = criterion(output, label)
                    valid_loss += loss.item()
                    valid_acc += get_acc(output, label)
               epoch_str = (
                "Epoch %d. Train Loss: %f, Train Acc: %f, Valid Loss: %f, Valid Acc: %f, "
                % (epoch, train_loss / len(train_data),
                   train_acc / len(train_data), valid_loss / len(valid_data),
                   valid_acc / len(valid_data)))
          else:
               epoch_str = ("Epoch %d. Train Loss: %f, Train Acc: %f, " %
                         (epoch, train_loss / len(train_data),
                          train_acc / len(train_data)))
               
          prev_time = cur_time
          print(epoch_str + time_str)
               
                                                  
train(net, train_data, test_data, 20, optimizer, criterion)






          
          





         

猜你喜欢

转载自blog.csdn.net/xckkcxxck/article/details/82379854