0011-pytorch入门-数据并行

'''
主要语句:
model.gpu()
tensor.gpu()
#多GPU的情况下
nn.DataParallel(model)
'''

import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader,Dataset

#一些参数
input_size = 5
output_size = 2

batch_size = 30
data_size = 100

#get the data
class RandomDataset(Dataset):
    def __init__(self,size,length):
        self.len = length
        self.data = torch.randn(length,size)

    def __len__(self):
        return self.len
    def __getitem__(self, item):
        return self.data[item]


random_loader = DataLoader(dataset=RandomDataset(input_size,100),batch_size=batch_size,shuffle=True)

#网路模型定义
class Model(nn.Module):
    # Our model

    def __init__(self, input_size, output_size):
        super(Model, self).__init__()
        self.fc = nn.Linear(input_size, output_size)

    def forward(self, input):
        output = self.fc(input)
        print("  In Model: input size", input.size(),
              "output size", output.size())

        return output
'''
首先,我们要多模型进行实例化然后检查是不是有多个GPUs,
如果是的话就要先用nn.DataParallel语句,然后就可以调用model.gpu()将模型放到GPUs上面。
如果只有一个GPU那就直接调用model.gpu()就可以了。
'''
model = Model(input_size,output_size)
if torch.cuda.device_count() > 1:
  # print("Let's use", torch.cuda.device_count(), "GPUs!")
  # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
  model = nn.DataParallel(model)

if torch.cuda.is_available():
   model.cuda()

for data in random_loader:
    if torch.cuda.is_available():
        input_var = Variable(data.cuda())
    else:
        input_var = Variable(data)

    output = model(input_var)
    print("Outside: input size", input_var.size(),
          "output_size", output.size())

猜你喜欢

转载自blog.csdn.net/zhonglongshen/article/details/112727133