pytorch实现cifar10分类

写这个程序主要是为了熟悉pytorch自定义dataset的方式,所以没有用torchvision自带的cifar10,而是直接将原始图片都下下来了(下载:https://pjreddie.com/media/files/cifar.tgz)。

dataset实现方式也很简单,我个人喜欢将数据集的所有图像地址和所有图像标签分别存在两个npy文件中,然后两个numpy array作为dataset输入即可。

完整的程序实现起来很简单,一个py文件即可:

# -*- coding: utf-8 -*-
import os, torch, glob, time, copy, torchvision, operator, itertools
import numpy as np
from torch.autograd import Variable
from PIL import Image  
from torchvision import models, transforms
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
from torch.optim import lr_scheduler
from sklearn.cluster import KMeans

def get_label(str):
    if 'airplane' in str:
        return 0
    elif 'automobile' in str:
        return 1
    elif 'bird' in str:
        return 2
    elif 'cat' in str:
        return 3
    elif 'deer' in str:
        return 4
    elif 'dog' in str:
        return 5
    elif 'frog' in str:
        return 6
    elif 'horse' in str:
        return 7
    elif 'ship' in str:
        return 8
    elif 'truck' in str:
        return 9
    
class myDataset(Dataset):
    # TensorDataset继承Dataset, 重载了__init__, __getitem__, __len__
    # 实现将一组Tensor数据对封装成Tensor数据集
    # 能够通过index得到数据集的数据,能够通过len,得到数据集大小

    def __init__(self, files_list, labels_list, transform):
        self.files_list = files_list
        self.labels_list = labels_list
        self.transform = transform

    def __getitem__(self, index):
        img = Image.open(self.files_list[index])
        img = self.transform(img)
        return img, self.labels_list[index]

    def __len__(self):
        return len(self.labels_list)
    
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        since2 = time.time()
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # Each epoch has a training and validation phase
        for phase in ['train', 'val']:
            if phase == 'train':
                scheduler.step()
                model.train(True)  # Set model to training mode
            else:
                model.train(False)  # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0

            # Iterate over data.
            for step, (inputs, labels) in enumerate(dataloaders[phase]):
                # get the inputs

                # wrap them in Variable
                if use_gpu:
                    inputs = Variable(inputs.cuda())
                    labels = Variable(labels.cuda())
                else:
                    inputs, labels = Variable(inputs), Variable(labels)

                # zero the parameter gradients
                optimizer.zero_grad()

                # forward
                outputs = model(inputs)
                _, preds = torch.max(outputs.data, 1)
                loss = criterion(outputs, labels)

                # backward + optimize only if in training phase
                if phase == 'train':
                    loss.backward()
                    optimizer.step()
                    if step % 100 ==0:
                        print(step, loss.data[0])


                # statistics
                running_loss += loss.data[0] * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / dataset_sizes[phase]
            epoch_acc = running_corrects / dataset_sizes[phase]

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                phase, epoch_loss, epoch_acc))

            

            # deep copy the model
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                torch.save(best_model_wts, 'model_ft.pth')
                
        print(time.time() - since2)

    
if __name__ == '__main__':    
        
#    training_set_dir = './cifar/train'
#    test_set_dir = './cifar/test'
#    
#    train_images_list = glob.glob(os.path.join(training_set_dir, '*.' + 'png'))
#    test_images_list = glob.glob(os.path.join(test_set_dir, '*.' + 'png'))   
#    
#    train_images_labels_list = [get_label(x) for x in train_images_list]
#    test_images_labels_list = [get_label(x) for x in test_images_list]
#    
#    np.save('train_images_list.npy', train_images_list)
#    np.save('test_images_list.npy', test_images_list)
#    
#    np.save('train_images_labels_list.npy', train_images_labels_list)
#    np.save('test_images_labels_list.npy', test_images_labels_list)
    
    train_images_list = np.load('train_images_list.npy')
    test_images_list = np.load('test_images_list.npy')
    
    train_images_labels_list = np.load('train_images_labels_list.npy')
    train_images_labels_list = train_images_labels_list.astype('int64')
    
    test_images_labels_list = np.load('test_images_labels_list.npy')
    test_images_labels_list = test_images_labels_list.astype('int64')

    num_classes = 10
    BATCH_SIZE = 128
    num_workers = 2
    use_gpu = torch.cuda.is_available()
    transform = transforms.Compose([
            transforms.Resize((224, 224)),
#            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean = [0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])
            ]
    )
    
    train_dataset = myDataset(train_images_list, train_images_labels_list, transform)    
    test_dataset = myDataset(test_images_list, test_images_labels_list, transform)
    
    train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE,  
                               shuffle=True, num_workers=num_workers)  
    test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE,  
                               shuffle=False, num_workers=num_workers)      
    dataloaders = {}
    dataloaders['train'] = train_loader
    dataloaders['val'] = test_loader
    
    dataset_sizes = {}
    dataset_sizes['train'] = len(train_images_labels_list)
    dataset_sizes['val'] = len(test_images_labels_list)
    
    model_ft = models.resnet18(pretrained=False)
    num_ftrs = model_ft.fc.in_features
    model_ft.fc = nn.Linear(num_ftrs, num_classes)
    
    if use_gpu:
        model_ft = nn.DataParallel(model_ft)
        model_ft = model_ft.cuda()
    
    criterion = nn.CrossEntropyLoss()
    
    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    
    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=1, gamma=0.1)
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                           num_epochs=2)
发布了44 篇原创文章 · 获赞 16 · 访问量 6万+

猜你喜欢

转载自blog.csdn.net/qq_32464407/article/details/81124696