pytorch基础知识二

线性回归的损失函数和梯度更新如下图:

一,numpy实现线性回归梯度下降

import numpy as np
import matplotlib.pyplot as plt
def get_fake_data(batch_size=8):
    ''' 产生随机数据:y=x*2+3,加上了一些噪声'''
    x = np.random.rand(batch_size, 1) * 5
    y = x * 2 + 3 + np.random.rand(batch_size, 1)*2
    return x, y

def get_gradient(theta,x,y):
    m=x.shape[0]
    Y_estimate=np.dot(x,theta)
    assert (Y_estimate.shape==(m,))
    error=Y_estimate-y
    assert (error.shape==(m,))
    cost =1.0/(2*m)*np.sum(error**2)
    #grad=(1.0/m)*np.dot(x.T,error).reshape(-1)#(2,)
    grad = (1.0 / m) * np.dot(error,x) # (2,)
    return grad,cost
def gradient_descent(x,y,iterations,alpha):

    theta=np.random.randn(2)
    costs=[]
    for i in range(iterations):
        grad,cost=get_gradient(theta,x,y)
        new_theta=theta-alpha*grad
        if i%100==0:
            print('{} iterations cost={}'.format(i,cost))
            costs.append(cost)
        theta=new_theta
    return costs,theta

def vis_data():
    # 来看看产生的x-y分布
    x, y = get_fake_data(batch_size=16)
    print(x.shape)
    print(y.shape)
    plt.scatter(np.squeeze(x), np.squeeze(y))
    plt.show()
if __name__=='__main__':
    batch_size=32
    data_x, data_y = get_fake_data(batch_size=batch_size)
    #添加一列为1的向量 实际上就是乘以 theta 就是b
    data_x=np.hstack((data_x,np.ones_like(data_x)))#(m,2)
    print(data_x)
    print(data_x.shape)

    costs,theta=gradient_descent(data_x,np.squeeze(data_y),iterations=50000,alpha=0.002)
    print(data_y.shape)

    #print(theta)
    y_predict=np.dot(data_x,theta)#theta[0]+theta[1]*data_x[:,1]
    print(y_predict.shape)
    plt.figure()
    #样本图
    print(data_x[:2])
    plt.scatter(data_x[:,0],np.squeeze(data_y),c='red')
    plt.plot(data_x[:,0],y_predict)
    plt.show()

红色的是散列点,蓝色的线是拟合的直线。

二,pytorch实现线性回归梯度下降

import numpy as np
import matplotlib.pyplot as plt
import torch as t

device=t.device('cpu')

def get_fake_data(batch_size=8):
    ''' 产生随机数据:y=x*2+3,加上了一些噪声'''
    x = t.rand(batch_size, 1,device=device) * 5
    y = x * 2 + 3 + t.rand(batch_size, 1)*2
    return x, y

def vis_data():
    # 来看看产生的x-y分布
    x, y = get_fake_data(batch_size=16)
    print(x.shape)
    print(y.shape)
    plt.scatter(np.squeeze(x), np.squeeze(y))
    plt.show()
if __name__=='__main__':
    # vis_data()

    m=batch_size=32
    data_x, data_y = get_fake_data(batch_size=batch_size)
    #添加一列为1的向量 实际上就是乘以 theta 就是b
    data_x=t.from_numpy(np.hstack((data_x,np.ones_like(data_x))))#(m,2)
    print(data_x.shape)

    theta = t.randn((2, 1),requires_grad=True)
    iterations=500
    lr = 0.005  # 学习率
    losses=[]
    for i in range(iterations):
        # forward:计算loss
        y_pred = data_x.mm(theta)
        print('y_pred',y_pred.shape)
        loss = 1/(2*m) * (y_pred - data_y) ** 2
        print('loss',loss.shape)
        loss = loss.sum()
        print('loss', loss.shape)
        losses.append(loss.item())

        # backward:手动计算梯度
        loss.backward()

        # 更新参数
        theta.data.sub_(lr * theta.grad.data)

        # 梯度清零
        theta.grad.data.zero_()
    print('losses=',losses)
    # 画图
    plt.scatter(np.squeeze(data_x[:,0]), np.squeeze(data_y),c='red')
    y_predict=data_x.mm(theta)
    print('y_predict.shape',y_predict.shape)
    print(data_x.detach().numpy())
    plt.plot(data_x.detach().numpy()[:,0], y_predict.detach().numpy())  # predicted
    plt.show()

三.对CIFAR10数据集进行训练

import torch as t
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
import os
from PIL import Image
import matplotlib.pyplot as plt
import cv2

show = ToPILImage()  # 可以把Tensor转成Image,方便可视化

# 定义对数据的预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 转为Tensor 归一化至0~1
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),  # 归一化
])

path = './data'
if not os.path.exists(path):
    os.mkdir(path)
# 训练集
trainset = tv.datasets.CIFAR10(
    root=path,
    train=True,
    download=True,
    transform=transform)

trainloader = t.utils.data.DataLoader(
    trainset,
    batch_size=4,
    shuffle=True,
    num_workers=2)

# 测试集
testset = tv.datasets.CIFAR10(
    path,
    train=False,
    download=True,
    transform=transform)

testloader = t.utils.data.DataLoader(
    testset,
    batch_size=4,
    shuffle=False,
    num_workers=2)

classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

(data, label) = trainset[100]
print(data.shape)
print(classes[label])


def vis_data_cv2():
    new_data = data.numpy()
    new_data = (new_data * 0.5 + 0.5) * 255
    print(new_data.shape)
    new_data = new_data.transpose((1, 2, 0))
    new_data = cv2.resize(new_data, (100, 100))
    new_data = cv2.cvtColor(new_data, cv2.COLOR_RGB2BGR)
    print(new_data.shape)
    cv2.imwrite('1.jpg', new_data)


def vis_data_mutilpy():
    dataiter = iter(trainloader)
    images, labels = dataiter.next()  # 返回4张图片及标签
    print(' '.join('%11s' % classes[labels[j]] for j in range(4)))
    img = show(tv.utils.make_grid((images + 1) / 2)).resize((400, 100))
    import numpy as np
    img = np.array(img)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    print(img.shape)
    cv2.imwrite('2.jpg', img)


import torch.nn as nn
import torch.nn.functional as F


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(x.size()[0], -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.softmax(self.fc3(x), dim=1)
        return x

net = Net()
print(net)
for name, parameters in net.named_parameters():
    print(name, ':', parameters.size())

params = list(net.parameters())
print(len(params))
print('params=', params)


from torch import optim
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
optimizer = optim.Adam(net.parameters(), lr=0.001)

t.set_num_threads(8)

losses=[]
for epoch in range(1):
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        if i<1000:
            # 输入数据
            inputs, labels = data
            # 梯度清零
            optimizer.zero_grad()
            # forward + backward
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            # print('loss=',loss)
            loss.backward()
            # 更新参数
            optimizer.step()
            # 打印log信息
            # loss 是一个scalar,需要使用loss.item()来获取数值,不能使用loss[0]
            losses.append(loss.item())
plt.plot(losses)
plt.show()
print('Finished Training')
correct = 0  # 预测正确的图片数
total = 0  # 总共的图片数
# 由于测试的时候不需要求导,可以暂时关闭autograd,提高速度,节约内存
with t.no_grad():
    for i, data in enumerate(testloader):
        images, labels = data
        outputs = net(images)
        _, predicted = t.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum()
print('10000张测试集中的准确率为: %d %%' % (100 * correct / total))

猜你喜欢

转载自blog.csdn.net/fanzonghao/article/details/90140745
今日推荐