pytorch实现cifar10分类任务

0 导入包

import torch
import torch.nn as nn
import torch.nn.functional as f
from torchvision.datasets import cifar
from torchvision import transforms
from torch.utils.data import DataLoader

1 定义网络结构

class Net(nn.Module):
    """网络结构"""
    def __init__(self):
        """
        初始化网络结构和损失函数
        """
        super().__init__()
        # out_size = (in_size - k + 2p)/ s +1
        self.conv1 = nn.Conv2d(3, 32, (3, 3), (1, 1), 1)
        self.conv2 = nn.Conv2d(32, 64, (3, 3), (1, 1), 1)
        self.fc1 = nn.Linear(64 * 8 * 8, 64 * 8)
        self.fc2 = nn.Linear(64 * 8, 64)
        self.fc3 = nn.Linear(64, 10)

    def forward(self, x):
        """正向传播"""
        x = f.relu(f.max_pool2d(f.relu(self.conv1(x)), (2, 2)))
        x = f.relu(f.max_pool2d(f.relu(self.conv2(x)), (2, 2)))
        x = x.view(-1, 64 * 8 * 8)
        x = f.relu(self.fc1(x))
        x = f.relu(self.fc2(x))
        x = f.softmax(self.fc3(x))
        return x

2 处理数据

  1. 初始化参数,网络、学习率、批量大小、数据存储地址、损失函数、优化器
  2. 数据增强,可以达到扩充数据集、提高拟合效果的作用
  3. 将数据处理为mini-batch的方式,提高收敛速度,一般batch设置为2的次方,如 2 4 、 2 6 、 2 8 2^4、2^6、2^8 242628等。
class Model:
    """处理数据、训练、评估"""
    def __init__(self, net, learning_rate=0.001, batch_size=128, path='./data/'):
        """
        初始化网络结构、参数、数据存储路径
        :param net: 网络结构
        :param learning_rate: 学习率
        :param batch_size: 批量大小
        :param path: 数据存储路径
        """
        self.net = net
        self.cost = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=learning_rate)
        self.batch_size = batch_size
        self.path = path

        # 数据增强
        self.transforms = transforms.Compose([
            transforms.CenterCrop([32, 32]),
            transforms.ToTensor(),
            transforms.Normalize((0.0, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        # 获取数据
        self.train = cifar.CIFAR10(root=self.path, train=True, transform=self.transforms, download=True)
        self.train_loader = DataLoader(dataset=self.train, batch_size=self.batch_size, shuffle=True)
        self.test = cifar.CIFAR10(root=self.path, train=False, transform=self.transforms)
        self.test_loader = DataLoader(dataset=self.test, batch_size=self.batch_size, shuffle=True)

3 训练

Model类中

对于每一个批次进行训练,可以设置epochs数。

  1. 设置epechs
  2. 划分训练数据和标签
  3. 清空梯度值zero_grad()
  4. 训练、反向传播backward()
  5. 迭代参数optimizer.step()
def train_(self, epochs=3):
    for epoch in range(epochs):
        losses = 0.0
        for i, data in enumerate(self.train_loader, 0):
            train_data, train_label = data
            self.optimizer.zero_grad()
            loss = self.cost(self.net(train_data), train_label)
            loss.backward()
            self.optimizer.step()
            losses += loss.item()
            if i % 10 == 0 and i:
                print(f'[epoch {
      
      epoch + 1}/{
      
      epochs}, {
      
      (i + 1) / len(self.train_loader) * 100:.2f}%] loss: {
      
      losses / 100:.2f}')
                losses = 0.0
    print('Train Finished!')

4 评估

Model类中

评估测试集的效果,编写tick_or_cross函数判断准确度。

@staticmethod
def tick_or_cross(predictions, labels):
    pred = torch.max(predictions.data, 1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return round(rights / len(pred), 4)
    
def evaluate(self, data_loader):
    accuracy = 0
    with torch.no_grad():
        for data in data_loader:
            d, label = data
            predictions = torch.argmax(self.net(d), dim=1)
            accuracy += self.tick_or_cross(predictions, label)
    print(f"accuracy: {
      
      accuracy / len(data_loader) * 100:.4f}%")

5 完整代码

import torch
import torch.nn as nn
import torch.nn.functional as f
from torchvision.datasets import cifar
from torchvision import transforms
from torch.utils.data import DataLoader


class Net(nn.Module):
    """网络结构"""
    def __init__(self):
        """
        初始化网络结构和损失函数
        """
        super().__init__()
        # out_size = (in_size - k + 2p)/ s +1
        self.conv1 = nn.Conv2d(3, 32, (3, 3), (1, 1), 1)
        self.conv2 = nn.Conv2d(32, 64, (3, 3), (1, 1), 1)
        self.fc1 = nn.Linear(64 * 8 * 8, 64 * 8)
        self.fc2 = nn.Linear(64 * 8, 64)
        self.fc3 = nn.Linear(64, 10)

    def forward(self, x):
        """正向传播"""
        x = f.relu(f.max_pool2d(f.relu(self.conv1(x)), (2, 2)))
        x = f.relu(f.max_pool2d(f.relu(self.conv2(x)), (2, 2)))
        x = x.view(-1, 64 * 8 * 8)
        x = f.relu(self.fc1(x))
        x = f.relu(self.fc2(x))
        x = f.softmax(self.fc3(x))
        return x


class Model:
    """处理数据、训练、评估"""
    def __init__(self, net, learning_rate=0.001, batch_size=128, path=r'C:\Users\daifu\.keras\datasets'):
        """
        初始化网络结构、参数、数据存储路径
        :param net: 网络结构
        :param learning_rate: 学习率
        :param batch_size: 批量大小
        :param path: 数据存储路径
        """
        self.net = net
        self.cost = nn.CrossEntropyLoss()
        self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=learning_rate)
        self.batch_size = batch_size
        self.path = path

        # 数据增强
        self.transforms = transforms.Compose([
            transforms.CenterCrop([32, 32]),
            transforms.ToTensor(),
            transforms.Normalize((0.0, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        # 获取数据
        self.train = cifar.CIFAR10(root=self.path, train=True, transform=self.transforms, download=True)
        self.train_loader = DataLoader(dataset=self.train, batch_size=self.batch_size, shuffle=True)
        self.test = cifar.CIFAR10(root=self.path, train=False, transform=self.transforms)
        self.test_loader = DataLoader(dataset=self.test, batch_size=self.batch_size, shuffle=True)

    @staticmethod
    def tick_or_cross(predictions, labels):
        pred = torch.max(predictions.data, 1)[1]
        rights = pred.eq(labels.data.view_as(pred)).sum()
        return round(rights / len(pred), 4)

    def train_(self, epochs=3):
        for epoch in range(epochs):
            losses = 0.0
            for i, data in enumerate(self.train_loader, 0):
                train_data, train_label = data
                self.optimizer.zero_grad()
                loss = self.cost(self.net(train_data), train_label)
                loss.backward()
                self.optimizer.step()
                losses += loss.item()
                if i % 10 == 0:
                    print(f'[epoch {
      
      epoch + 1}/{
      
      epochs}, {
      
      (i + 1) / len(self.train_loader) * 100:.2f}%] loss: {
      
      losses / 100:.2f}')
                    losses = 0.0
        print('Train Finished!')

    def evaluate(self, data_loader):
        accuracy = 0
        with torch.no_grad():
            for data in data_loader:
                d, label = data
                predictions = torch.argmax(self.net(d), dim=1)
                accuracy += self.tick_or_cross(predictions, label)
        print(f"accuracy: {
      
      accuracy / len(data_loader) * 100:.4f}%")


if __name__ == '__main__':
    _net = Net()
    model = Model(_net)
    model.train_(10)
    model.evaluate(model.test_loader)

猜你喜欢

转载自blog.csdn.net/Zeus_daifu/article/details/128275952