wavenet时间序列预测---TensorFlow版(附源码+数据)

利用wavenet做单一时间序列预测

训练后的loss函数如下

在这里插入图片描述

模型预测效果图

在这里插入图片描述

源码和数据地址

链接:https://pan.baidu.com/s/1fmSuMeJ81Cc54wFvROPEPQ
提取码:lqvd
复制这段内容后打开百度网盘手机App,操作更方便哦

wavenet原理简介

WaveNet的核心是扩张的因果卷积层(Dilated Casual Convolutions),它允许它正确处理时间顺序并处理长期依赖,而不会导致模型复杂性的爆炸。以下是DeepMind帖子中其结构的可视化:
在这里插入图片描述
首先,我们在输入序列上滑动权重过滤器,顺序地将其应用于系列的(通常是重叠的)区域。但是当我们使用时间序列的历史来预测它的未来时,我们必须要小心。当我们形成最终将输入步骤连接到输出的层时,我们必须确保输入不会影响及时进行输出的输出步骤。否则,我们将使用未来预测过去,所以我们的模型会作弊!
为了确保我们不以这种方式作弊,我们调整卷积设计以明确禁止未来影响过去。换句话说,我们只允许输入连接到因果结构中的未来时间步输出,如下图所示,来自WaveNet论文的可视化。实际上,这种因果一维结构很容易通过将传统的卷积输出移动多个时间步来实现。
在这里插入图片描述

代码部分说明

导入外部块

from keras.models import Model, load_model
from keras.layers import Input, Conv1D, Dense, Dropout, Lambda, Concatenate
from keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import pathlib
import gc
from datetime import timedelta
import os

设定要预测的点数

pred_steps = 50

数据分割

def preprocessing():

    df = pd.read_csv('./data/train_1.csv',usecols=['value'])

    val_pred_start = len(df) - pred_steps
    val_pred_end = len(df)

    train_pred_start = val_pred_start - pred_steps - 1
    train_pred_end = val_pred_start - 1
    enc_length = train_pred_start

    train_enc_start = 0
    train_enc_end = train_enc_start + enc_length - 1

    val_enc_start = train_enc_start + pred_steps
    val_enc_end = val_enc_start + enc_length - 1

    print('Train encoding:', train_enc_start, '-', train_enc_end)
    print('Train prediction:', train_pred_start, '-', train_pred_end, '\n')
    print('Val encoding:', val_enc_start, '-', val_enc_end)
    print('Val prediction:', val_pred_start, '-', val_pred_end)
    series_array = np.array(df)
    series_array = series_array.reshape(len(series_array))
    return train_enc_start, train_enc_end, train_pred_start, train_pred_end, val_enc_start, val_enc_end, val_pred_start, val_pred_end, series_array

数据提取、归一化、反归一化操作

#根据起始日期来划分数据
def get_time_block_series(series_array, start_date, end_date):
    return series_array[start_date:end_date]

#数据归一化,返回归一化后的数据,平均值,方差
def transform_series_encode(data):
    data_mean = data.mean(axis=0).reshape(-1, 1)
    data_std = data.std(axis=0).reshape(-1,1)
    epsilon = 1e-6
    data = (data - data_mean)/(data_std+epsilon)
    data = data.reshape((data.shape[0], data.shape[1], 1))
    return data, data_mean, data_std

#数据反归一化
def untransform_series_decode(data, data_mean, data_std):
    # data = data.reshape(data.shape[0], data.shape[1])
    data_std = data_std[0][0]
    data_mean = data_mean[0][0]
    data = data*data_std + data_mean
    data = data.reshape(len(data))
    # np.clip(np.power(10., data) - 1.0, 0.0, None)
    return  data
#数据归一化
def transform_series_decode(data, data_mean, data_std):
    epsilon = 1e-6 # prevent numerical error in the case std = 0
    data = (data - data_mean)/(data_std+epsilon)
    data = data.reshape((data.shape[0], data.shape[1], 1))
    return data

模型预测

#数据预测
def predict_sequences(input_sequences, batch_size):
    history_sequences = input_sequences.copy()
    print(history_sequences.shape)
    pred_sequences = np.zeros((history_sequences.shape[0], pred_steps, 1))  # initialize output (pred_steps time steps)
    print(pred_sequences.shape)
    for i in range(pred_steps):
        # record next time step prediction (last time step of model output)
        last_step_pred = model.predict(history_sequences,batch_size)[:, -1, 0]
        print("last step prediction first 10 channels")
        print(last_step_pred[0:10])
        print(i,last_step_pred.shape)
        pred_sequences[:, i, 0] = last_step_pred

        # add the next time step prediction to the history sequence
        history_sequences = np.concatenate([history_sequences,
                                           last_step_pred.reshape(-1, 1, 1)], axis=1)

    return pred_sequences

模型预测和图形显示

#数据预测和画图
def predict_and_plot(encoder_input_data, pre_target_data, encode_series_mean, encode_series_std,batch_size, enc_tail_len=50, decoder_target_data=1):
    encode_series = encoder_input_data
    pred_series = predict_sequences(encode_series, batch_size)

    encode_series = encode_series.reshape(-1, 1)
    pred_series = pred_series.reshape(-1, 1)
    encode_series = encode_series.reshape(len(encode_series))
    pred_series = pred_series.reshape(len(pred_series))
    pre_target_data = pre_target_data.reshape(len(pre_target_data))

    encode_series = untransform_series_decode(encode_series,encode_series_mean, encode_series_std)
    pred_series = untransform_series_decode(pred_series,encode_series_mean, encode_series_std)
    # pre_target_data = untransform_series_decode(pre_target_data,encode_series_mean, encode_series_std)
    print(encode_series[-100:])

    plt.figure(figsize=(10, 6))
    plt.plot(range(0,100), encode_series[-100:],label = 'Encoding Series')
    plt.plot(range(100, 100 + len(pred_series)), pred_series, color='red', label='predict')
    plt.plot(range(100, 100 + len(pre_target_data)), pre_target_data, color='orange', label='target')

    plt.title('Encoder Series Tail of Length %d, Target Series, and Predictions' % enc_tail_len)
    plt.legend()
    plt.show()

模型搭建

if not os.path.exists('{}.h5'.format(model_name)):
    n_filters = 128
    filter_width = 5
    dilation_rates = [2**i for i in range(12)]
    history_seq = Input(shape=(None, 1))
    x = history_seq

    for dilation_rate in dilation_rates:
        x = Conv1D(filters = n_filters,
                   kernel_size=filter_width,
                   padding='causal',
                   dilation_rate=dilation_rate)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(.8)(x)
    x = Dense(64)(x)
    x = Dense(1)(x)
    def slice(x, seq_length):
        return x[:, -seq_length:, :]

    pred_seq_train = Lambda(slice, arguments={
    
    'seq_length':pred_steps})(x)

    model = Model(history_seq, pred_seq_train)

model.summary()

完整代码

from keras.models import Model, load_model
from keras.layers import Input, Conv1D, Dense, Dropout, Lambda, Concatenate
from keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import pathlib
import gc
from datetime import timedelta
import os


pred_steps = 50
#数据处理部分
def preprocessing():

    df = pd.read_csv('./data/train_1.csv',usecols=['value'])

    val_pred_start = len(df) - pred_steps
    val_pred_end = len(df)

    train_pred_start = val_pred_start - pred_steps - 1
    train_pred_end = val_pred_start - 1
    enc_length = train_pred_start

    train_enc_start = 0
    train_enc_end = train_enc_start + enc_length - 1

    val_enc_start = train_enc_start + pred_steps
    val_enc_end = val_enc_start + enc_length - 1

    print('Train encoding:', train_enc_start, '-', train_enc_end)
    print('Train prediction:', train_pred_start, '-', train_pred_end, '\n')
    print('Val encoding:', val_enc_start, '-', val_enc_end)
    print('Val prediction:', val_pred_start, '-', val_pred_end)
    series_array = np.array(df)
    series_array = series_array.reshape(len(series_array))
    return train_enc_start, train_enc_end, train_pred_start, train_pred_end, val_enc_start, val_enc_end, val_pred_start, val_pred_end, series_array
#根据起始日期来划分数据
def get_time_block_series(series_array, start_date, end_date):
    return series_array[start_date:end_date]

#数据归一化,返回归一化后的数据,平均值,方差
def transform_series_encode(data):
    data_mean = data.mean(axis=0).reshape(-1, 1)
    data_std = data.std(axis=0).reshape(-1,1)
    epsilon = 1e-6
    data = (data - data_mean)/(data_std+epsilon)
    data = data.reshape((data.shape[0], data.shape[1], 1))
    return data, data_mean, data_std

#数据反归一化
def untransform_series_decode(data, data_mean, data_std):
    # data = data.reshape(data.shape[0], data.shape[1])
    data_std = data_std[0][0]
    data_mean = data_mean[0][0]
    data = data*data_std + data_mean
    data = data.reshape(len(data))
    # np.clip(np.power(10., data) - 1.0, 0.0, None)
    return  data

#数据归一化
def transform_series_decode(data, data_mean, data_std):
    epsilon = 1e-6 # prevent numerical error in the case std = 0
    data = (data - data_mean)/(data_std+epsilon)
    data = data.reshape((data.shape[0], data.shape[1], 1))
    return data


#数据预测
def predict_sequences(input_sequences, batch_size):
    history_sequences = input_sequences.copy()
    print(history_sequences.shape)
    pred_sequences = np.zeros((history_sequences.shape[0], pred_steps, 1))  # initialize output (pred_steps time steps)
    print(pred_sequences.shape)
    for i in range(pred_steps):
        # record next time step prediction (last time step of model output)
        last_step_pred = model.predict(history_sequences,batch_size)[:, -1, 0]
        print("last step prediction first 10 channels")
        print(last_step_pred[0:10])
        print(i,last_step_pred.shape)
        pred_sequences[:, i, 0] = last_step_pred

        # add the next time step prediction to the history sequence
        history_sequences = np.concatenate([history_sequences,
                                           last_step_pred.reshape(-1, 1, 1)], axis=1)

    return pred_sequences

#数据预测和画图
def predict_and_plot(encoder_input_data, pre_target_data, encode_series_mean, encode_series_std,batch_size, enc_tail_len=50, decoder_target_data=1):
    encode_series = encoder_input_data
    pred_series = predict_sequences(encode_series, batch_size)

    encode_series = encode_series.reshape(-1, 1)
    pred_series = pred_series.reshape(-1, 1)
    encode_series = encode_series.reshape(len(encode_series))
    pred_series = pred_series.reshape(len(pred_series))
    pre_target_data = pre_target_data.reshape(len(pre_target_data))

    encode_series = untransform_series_decode(encode_series,encode_series_mean, encode_series_std)
    pred_series = untransform_series_decode(pred_series,encode_series_mean, encode_series_std)
    # pre_target_data = untransform_series_decode(pre_target_data,encode_series_mean, encode_series_std)
    print(encode_series[-100:])

    plt.figure(figsize=(10, 6))
    plt.plot(range(0,100), encode_series[-100:],label = 'Encoding Series')
    plt.plot(range(100, 100 + len(pred_series)), pred_series, color='red', label='predict')
    plt.plot(range(100, 100 + len(pre_target_data)), pre_target_data, color='orange', label='target')

    plt.title('Encoder Series Tail of Length %d, Target Series, and Predictions' % enc_tail_len)
    plt.legend()
    plt.show()

model_name = 'Wavenet'

# load existing model
load_previous_models = True
if os.path.exists('{}.h5'.format(model_name)):
    print('Load Previous Models')
    model = load_model(model_name+'.h5')


#根据数据处理,得到数据的划分
train_enc_start, train_enc_end, train_pred_start, train_pred_end, val_enc_start, val_enc_end, val_pred_start, val_pred_end, series_array = preprocessing()

#### Build neural networks ####
if not os.path.exists('{}.h5'.format(model_name)):
    n_filters = 128
    filter_width = 5
    dilation_rates = [2**i for i in range(12)]
    history_seq = Input(shape=(None, 1))
    x = history_seq

    for dilation_rate in dilation_rates:
        x = Conv1D(filters = n_filters,
                   kernel_size=filter_width,
                   padding='causal',
                   dilation_rate=dilation_rate)(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(.8)(x)
    x = Dense(64)(x)
    x = Dense(1)(x)
    def slice(x, seq_length):
        return x[:, -seq_length:, :]

    pred_seq_train = Lambda(slice, arguments={
    
    'seq_length':pred_steps})(x)

    model = Model(history_seq, pred_seq_train)

model.summary()

#### Train neural networks ####
batch_size = 2**8
epochs = 100
encoder_input_data = get_time_block_series(series_array,train_enc_start, train_enc_end)
encoder_input_data, encode_series_mean, encode_series_std = transform_series_encode(encoder_input_data)
decoder_target_data = get_time_block_series(series_array,train_pred_start, train_pred_end)
decoder_target_data = transform_series_decode(decoder_target_data, encode_series_mean, encode_series_std)
# we append a lagged history of the target series to the input data,
# so that we can train with teacher forcing
lagged_target_history = decoder_target_data[:, :-1, :1]
encoder_input_data = np.concatenate([encoder_input_data, lagged_target_history], axis=1)
model.compile(Adam(), loss='mean_absolute_error')
history = model.fit(encoder_input_data, decoder_target_data,
                    batch_size=batch_size,
                    epochs=epochs)
# save the model
model.save(model_name + '.h5')

plt.figure()
plt.plot(history.history['loss'])

plt.xlabel('Epoch')
plt.ylabel('Mean Absolute Error Loss')
plt.title('Loss Over Time')
plt.show()

pre_input_data = get_time_block_series(series_array, val_enc_start, val_enc_end)
pre_input_data, encode_series_mean, encode_series_std = transform_series_encode(pre_input_data)
pre_target_data = get_time_block_series(series_array,val_pred_start, val_pred_end)

predict_and_plot(pre_input_data, pre_target_data,encode_series_mean, encode_series_std, 2**8,decoder_target_data=pre_target_data)

参考

【1】 基于卷积的神经网络的时间序列预测——WaveNet
【2】 Forecasting with Neural Networks - An Introduction to Sequence-to-Sequence Modeling Of Time Series
【3】谷歌WaveNet如何通过深度学习方法来生成声音?

猜你喜欢

转载自blog.csdn.net/qq_22290797/article/details/108506401