小刘总——深度神经网络

1.检查数据
2.训练集表现
3.测试集表现
4.过拟合问题
5.调参(训练集、验证集、测试集)
6.模型融合

import matplotlib.pyplot as plt
import numpy as np

import torch
import torchvision
import torchvision.transforms as transforms

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
 #如何使用gpu
data = data.cuda()
model = model.cuda()
#gpu->cpu
data.cpu().numpy()
#指定gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
data = data.to(device)
#多个gpu
device_ids=range(torch.cuda.device_count())
#[0,1,2,3]
model = torch.nn.DataParallel(model,device_ids=device_ids)
model = model.to(device)


#读取数据
#将数据做成list或者array
data(list)->[data1,data2,...]
label(list)->[label1], label1->[box1,box2,box3]
data = torch.Tensor(data)
label = torch.Tensor(label)
dataset = torch.utils.data.Dataset(data,label)
dataloader = torch.utils.data.Dataloader(dataset,batch_size=1, shuffle=False, 
num_workers=0, pin_memory=False)
for data,label in dataloader :
    for box in label:
        

#模型
class Net(nn.Module):
    def __init__(self,num_fc1):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 4 * 4, num_fc1)
        self.fc2 = nn.Linear(num_fc1, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 4 * 4)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    #向后传播呢?

net = Net()



#初始化权重
#sigmoid
torch.nn.init.xavier_uniform_(tensor, gain=1)
torch.nn.init.xavier_normal_(tensor, gain=1)
#relu
torch.nn.init.kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='relu')
torch.nn.init.kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='relu')


#优化器
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#在训练过程中修改学习率
for param_group in optimizer.param_groups:
        param_group['lr'] = lr*exp(-epoch)


#保存模型
#1.只保存参数
# 保存
torch.save(model.state_dict(), 'parameter.pkl')
# 加载
model = Model(...)
model.load_state_dict(torch.load('parameter.pkl'))
#2.保存整个网络
# 保存
torch.save(model, 'model.pkl')
# 加载
model = torch.load('model.pkl')


[[x,y,w,h,c],[x1,y1,w1,h1,c1],...]


print(net)



print(net.state_dict())


#pyotrch随机性
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)




#调参
#Ray tune
from ray import tune

def train_mnist(config):
    train_loader, test_loader = get_data_loaders()
    model = Net(config['fc1'],config['conv1'])
    optimizer = optim.SGD(model.parameters(), lr=config["lr"])
    for i in range(10):
        train(model, optimizer, train_loader)
        acc = test(model, test_loader)
        maxium=()
        tune.track.log(mean_accuracy=acc) 

analysis = tune.run(
    train_mnist, 
    num_samples=30,
    scheduler=ASHAScheduler(metric="mean_accuracy", mode="max"),
    resources_per_trial={"CPU": 1, "GPU": 0.1},
    config={"lr": tune.grid_search([0.001, 0.01, 0.1])}#tune.uniform(0.001, 0.1)
    )

print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))




#可视化
# transforms
transform = transforms.Compose(
    [transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))])

# datasets
trainset = torchvision.datasets.FashionMNIST('./data',
    download=True,
    train=True,
    transform=transform)
testset = torchvision.datasets.FashionMNIST('./data',
    download=True,
    train=False,
    transform=transform)

# dataloaders
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
                                        shuffle=True, num_workers=2)


testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                        shuffle=False, num_workers=2)

# constant for classes
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
        'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot')

# helper function to show an image
# (used in the `plot_classes_preds` function below)
def matplotlib_imshow(img, one_channel=False):
    if one_channel:
        img = img.mean(dim=0)
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    if one_channel:
        plt.imshow(npimg, cmap="Greys")
    else:
        plt.imshow(np.transpose(npimg, (1, 2, 0)))



class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 4 * 4, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 4 * 4)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


net = Net()




criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)


from torch.utils.tensorboard import SummaryWriter

writer = SummaryWriter('fashion_mnist_experiment_1')


#随机抽样
dataiter = iter(trainloader)
images, labels = dataiter.next()
img_grid = torchvision.utils.make_grid(images)
#显示图片
matplotlib_imshow(img_grid, one_channel=True)
#写入writer
writer.add_image('four_fashion_mnist_images', img_grid)



#命令行输入
tensorboard --logdir=fashion_mnist_experiment_1




#网络可视化
writer.add_graph(net, images)
writer.close()




def images_to_probs(net, images):
    output = net(images)
    # convert output probabilities to predicted class
    _, preds_tensor = torch.max(output, 1)
    preds = np.squeeze(preds_tensor.numpy())
    return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]


def plot_classes_preds(net, images, labels):
    preds, probs = images_to_probs(net, images)
    # plot the images in the batch, along with predicted and true labels
    fig = plt.figure(figsize=(12, 48))
    for idx in np.arange(4):
        ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])
        matplotlib_imshow(images[idx], one_channel=True)
        ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
            classes[preds[idx]],
            probs[idx] * 100.0,
            classes[labels[idx]]),
                    color=("green" if preds[idx]==labels[idx].item() else "red"))
    return fig






#训练
running_loss = 0.0
for epoch in range(1):  
    for i, data in enumerate(trainloader, 0):

        inputs, labels = data
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if i % 1000 == 999:    
            #记录损失函数
            writer.add_scalar('training loss',
                            running_loss / 1000,
                            epoch * len(trainloader) + i)

            #记录类别预测
            writer.add_figure('predictions vs. actuals',
                            plot_classes_preds(net, inputs, labels),
                            global_step=epoch * len(trainloader) + i)
            running_loss = 0.0
print('Finished Training')

猜你喜欢

转载自blog.csdn.net/weixin_44659309/article/details/107797028
今日推荐