pytorch 制作数据并加载使用

import os
import numpy as np
import random
import cv2

import torch
import torch.nn as nn
import torch.utils.data as dataf
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim


train_image_path = r"F:test\image"
signal = os.listdir(train_image_path)
all_path = []
data_path = train_image_path
for fsingal in signal:    
    filepath = data_path+"/"+fsingal
    filename  = os.listdir(filepath)
    for fname in filename:
        ffpath = filepath+"/"+fname
        path = [fsingal,ffpath]
        all_path.append(path)

count = len(all_path)
data_x = np.empty((count,1,28,28),dtype="float32")
data_y = []
random.shuffle(all_path)
i=0
for item in all_path:
    img = cv2.imread(item[1],0)
    img = cv2.resize(img,(28,28))
    arr = np.asarray(img,dtype="float32")
    data_x[i ,:,:,:] = arr
    i+=1
    data_y.append(int(item[0]))

data_x = data_x / 255
data_y = np.asarray(data_y,)
data_x = torch.from_numpy(data_x)
data_y = torch.from_numpy(data_y).long() # 将数据类型转为Long

#制作数据集
dataset = dataf.TensorDataset(data_x,data_y)
#加载数据,为之后的训练准备
loader = dataf.DataLoader(dataset, batch_size=batchsize, shuffle=True)

class L5_NET(nn.Module):
    def __init__(self):
        super(L5_NET,self).__init__();
        self.conv1 = nn.Conv2d(1,20,kernel_size=5)
        self.conv2 = nn.Conv2d(20,30,kernel_size=5)
        self.conv2_drop = nn.Dropout2d()
        self.fc1 = nn.Linear(30*4*4,50)
        self.fc2 = nn.Linear(50,2)

    def forward(self,x):
        x = F.relu(F.max_pool2d(self.conv1(x),2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)),2))
        x = x.view(-1,30*4*4)
        x = F.relu(self.fc1(x))
        x = F.dropout(x,p=0.8,training=self.training)
        x = self.fc2(x)

        return x

model = L5_NET()    

cuda = False
train_epoch = 20
train_lr = 0.01
train_momentum = 0.5
batchsize = 3

optimizer = optim.SGD(model.parameters(),lr = train_lr,momentum = train_momentum)
criterion = nn.CrossEntropyLoss()
def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(loader):
         if cuda:
             data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        #求导
        optimizer.zero_grad()
        #训练模型,输出结果
        output = model(data)
        #在数据集上预测loss
        loss = F.nll_loss(output, target)
        #反向传播调整参数
        loss.backward()
        #SGD刷新进步
        optimizer.step()
        #实时输出
        if batch_idx % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(loader.dataset),100. * batch_idx / len(loader), loss.data[0]))

在这期间有报错Assertion `cur_target >= 0 && cur_target < n_classes’ failed,在loss = F.nll_loss(output, target),原因是因为我的数据集类别从1开始,那么,类别数就要加1。

猜你喜欢

转载自blog.csdn.net/lucky_kai/article/details/82701643
今日推荐