pytorch实现分类

完整代码

#实现分类
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.optim as optim

#生成数据
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1)
y0 = torch.zeros(100)
x1 = torch.normal(-2*n_data,1)
y1 = torch.ones(100)
#x当做数据,y当做标签
x = torch.cat((x0,x1), 0).type(torch.FloatTensor)
y = torch.cat((y0,y1), ).type(torch.LongTensor)

x,y = Variable(x),Variable(y)

#绘制图像
#plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, )
#plt.show()


#定义网络
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)
        self.predict = torch.nn.Linear(n_hidden,n_output)

    def forward(self, x):
        x = F.relu(self.hidden(x))
        x = self.predict(x)
        return x
#输入是两个特征,x对应的特征和y对应的特征,输出是2个类,0和1
net = Net(2, 10, 2)
#print(net)  
#输出为[0,1]说明图片为class1,若是[1,0],说明输出为class0。这是二分类
#输出为[0,1,0]说明图片为class1,若是[1,0,0],说明输出为class0,若是[0,0,1],说明输出为class2。这是三分类


#优化
optimizer = optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss() 
#输出是概率

#可视化
plt.ion()
#plt.show()

for t in range(100):
    out = net(x)
    loss = loss_func(out, y) #预测值和真实值

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    #可视化
    if t % 2 == 0:
        plt.cla()
        prediction = torch.max(F.softmax(out), 1)[1]
        pred_y = prediction.data.numpy().squeeze()
        target_y = y.data.numpy()
        plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
        accuracy = sum(pred_y == target_y)/200
        plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color':  'red'})
        plt.pause(0.1)

plt.ioff()
plt.show()

猜你喜欢

转载自www.cnblogs.com/loyolh/p/12290935.html
今日推荐