PyTorch 使用 RNN 输入sin(x)值序列,预测最后一个sin(x)对应的cos(x)

版权声明:转载请注明出处及原文地址。 https://blog.csdn.net/zl1085372438/article/details/86641414

    每一个sin(x)可能对应两个cos(x)的数值,不是一一对应关系,这边借助于RNN,输入sin(x)序列,预测sin(x)序列中最后一个值,对应的cos(x),借助的是RNN的记忆性。

这边要注意的是各个Tensor的维度的处理,熟悉torch.stack,torch.cat,x.squeeze(index), x.unsqueeze(index) 函数的使用

import torch
import torch.nn as nn
import torchvision
from torchvision import datasets,transforms
from torch.autograd import Variable
from matplotlib import pyplot as plt
import numpy as np



class RNN(nn.Module):
    def __init__(self):
        super().__init__()
        self.rnn = nn.RNN(
                input_size = 1,
                hidden_size = 1,
                num_layers = 1,
                batch_first = True,
        )

    def forward(self, input):
        out,hn = self.rnn(input,None)
        #hn (num_layers * num_directions, batch, hidden_size)  (1,64,1)
        # print(hn.shape)
        hn = hn.squeeze(0)
        return hn





def get_One_train_data():
    start = (np.random.rand()-0.5)*10
    x = np.linspace(start,start+np.pi,num = 10)
    sinx = np.sin(x)
    cosx = np.cos(x)
    coslastx = cosx[-1]
    return sinx,coslastx


def get_train_data(batch_size = 64):
    batch_data = []
    batch_labels = []
    for i in range(batch_size):
        data,label = get_One_train_data()
        label = label.reshape(1)
        data = torch.FloatTensor(data)
        label = torch.FloatTensor(label)
        data = data.unsqueeze(-1)
        batch_data.append(data)
        batch_labels.append(label)
    batch_data = torch.stack(batch_data,dim=0)
    batch_labels = torch.stack(batch_labels,dim=0)
    return Variable(batch_data) , Variable(batch_labels)

model = RNN()
print(model)

loss_f = nn.MSELoss()
optimzer = torch.optim.SGD(model.parameters(),lr=1e-1)

def get_ACC():
    bs = 5
    batch_data,batch_labels = get_train_data(batch_size = bs)
    out = model(batch_data)
    for i in range(bs):
        print('true cos:{} , pred cos:{}'.format(batch_labels[i,0],out[i,0]))



for cnt in range(10000):
    batch_data,batch_labels = get_train_data()
    out = model(batch_data)
    loss = loss_f(out,batch_labels)
    optimzer.zero_grad()
    loss.backward()
    optimzer.step()
    if(cnt%100==0):
        print_loss=loss.data.item()
        print('cnt:{},loss:{}'.format(cnt,print_loss))


torch.save(model,'sincosModel')

get_ACC()

这个问题当然,也可以用全连接神经网络解决,输入和上面差不多,也是多个sin(x),预测最后一个sin(x)对应的cos(x),可以做到在速度更快的情况下,精度还比上面的RNN高,。。。

import torch
import torch.nn as nn
import torchvision
from torchvision import datasets,transforms
from torch.autograd import Variable
from matplotlib import pyplot as plt
import numpy as np



class NN(nn.Module):
    def __init__(self):
        super().__init__()
        self.nn = nn.Sequential(
                nn.Linear(10,10),
                nn.BatchNorm1d(10),
                nn.ReLU(),
                nn.Linear(10,1),
        )

    def forward(self, input):
        out = self.nn(input)
        return out





def get_One_train_data():
    start = (np.random.rand()-0.5)*10
    x = np.linspace(start,start+np.pi,num = 10)
    sinx = np.sin(x)
    cosx = np.cos(x)
    coslastx = cosx[-1]
    return sinx,coslastx


def get_train_data(batch_size = 64):
    batch_data = []
    batch_labels = []
    for i in range(batch_size):
        data,label = get_One_train_data()
        label = label.reshape(1)
        data = torch.FloatTensor(data)
        label = torch.FloatTensor(label)
        batch_data.append(data)
        batch_labels.append(label)
    batch_data = torch.stack(batch_data,dim=0)
    batch_labels = torch.stack(batch_labels,dim=0)
    return Variable(batch_data) , Variable(batch_labels)

model = NN()
print(model)

loss_f = nn.MSELoss()
optimzer = torch.optim.SGD(model.parameters(),lr=1e-1)

def get_ACC():
    model.eval()
    bs = 5
    batch_data,batch_labels = get_train_data(batch_size = bs)
    out = model(batch_data)
    for i in range(bs):
        print('true cos:{} , pred cos:{}'.format(batch_labels[i,0],out[i,0]))
    model.train()



for cnt in range(10000):
    batch_data,batch_labels = get_train_data()
    out = model(batch_data)
    loss = loss_f(out,batch_labels)
    optimzer.zero_grad()
    loss.backward()
    optimzer.step()
    if(cnt%100==0):
        print_loss=loss.data.item()
        print('cnt:{},loss:{}'.format(cnt,print_loss))


torch.save(model,'sincosModel')

get_ACC()

但是如果是一个sin(x),一个预测cos(x),这样的全连接神经网络就收敛不了啦!如下代码收敛不了:

import torch
import torch.nn as nn
import torchvision
from torchvision import datasets,transforms
from torch.autograd import Variable
from matplotlib import pyplot as plt
import numpy as np



class NN(nn.Module):
    def __init__(self):
        super().__init__()
        self.nn = nn.Sequential(
                nn.Linear(1,20),
                nn.BatchNorm1d(20),
                nn.ReLU(),
                nn.Linear(20, 20),
                nn.BatchNorm1d(20),
                nn.ReLU(),
                nn.Linear(20,1),
        )

    def forward(self, input):
        out = self.nn(input)
        return out





def get_One_train_data():
    start = (np.random.rand()-0.5)*10
    x = np.linspace(start,start+np.pi,num = 10)
    sinx = np.sin(x)
    sinx = sinx[-1]
    cosx = np.cos(x)
    coslastx = cosx[-1]
    return sinx,coslastx


def get_train_data(batch_size = 64):
    batch_data = []
    batch_labels = []
    for i in range(batch_size):
        data,label = get_One_train_data()
        label = label.reshape(1)
        data = data.reshape(1)
        data = torch.FloatTensor(data)
        label = torch.FloatTensor(label)
        batch_data.append(data)
        batch_labels.append(label)
    batch_data = torch.stack(batch_data,dim=0)
    batch_labels = torch.stack(batch_labels,dim=0)
    return Variable(batch_data) , Variable(batch_labels)

model = NN()
print(model)

loss_f = nn.MSELoss()
optimzer = torch.optim.SGD(model.parameters(),lr=1e-1)

def get_ACC():
    model.eval()
    bs = 5
    batch_data,batch_labels = get_train_data(batch_size = bs)
    out = model(batch_data)
    for i in range(bs):
        print('true cos:{} , pred cos:{}'.format(batch_labels[i,0],out[i,0]))
    model.train()



for cnt in range(10000):
    batch_data,batch_labels = get_train_data()
    out = model(batch_data)
    loss = loss_f(out,batch_labels)
    optimzer.zero_grad()
    loss.backward()
    optimzer.step()
    if(cnt%100==0):
        print_loss=loss.data.item()
        print('cnt:{},loss:{}'.format(cnt,print_loss))


torch.save(model,'sincosModel')

get_ACC()

猜你喜欢

转载自blog.csdn.net/zl1085372438/article/details/86641414