利用Python_keras编写简单BP神经网络以及初步分析的算法

###2018/6/16 keras_bp###
###非原创 对他人文章进行改编 侵删###
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt 
from keras.optimizers import SGD,adam
from keras.models import Sequential
from keras.layers.core import Dense, Activation

#####参数设置
epoch = 500		#迭代次数
inputnum = 4		#输入节点数
midnum = 10		#隐层节点数
outputnum = 1		#输出节点数
learnrate = 0.1 	#学习率
Terror = 1e-7		#迭代中止条件
datannum = 2688		#样本总数
trainX = 0.7		#训练数据比例
Terror = 0.2		#预测误差低于Terror的比例
#输入数据文件地址
inputfile = 'C:\\Users\\37989\\Desktop\\input.xlsx'   		 	
#输出预测的文件地址
outputfile = 'C:\\Users\\37989\\Desktop\\output.xlsx' 			
#模型保存地址
modelfile = 'C:\\Users\\37989\\Desktop\\modelweight.model' 	
#因子所在列
factor = ['F1','F2','F3','F4']														
#目标所在列
label = ['L1'] 		
																					
#初始处理
trainnum = int(datannum * trainX)
validnum = int((datannum - trainnum) / 2)
testnum = datannum - (trainnum + validnum)
data = pd.read_excel(inputfile,index='Date',sheetname=0) 
listnum = np.random.permutation(datannum)		#打乱序列
data_train = data.loc[listnum[0:trainnum]].copy() 
data_valid = data.loc[listnum[trainnum:trainnum + validnum]].copy()
data_test = data.loc[listnum[trainnum + validnum:datannum]].copy()
data_std = data.max() - data.min()
data_train = (data_train - data.min()) / data_std
data_valid = (data_valid - data.min()) / data_std
x_train = data_train[factor].as_matrix()
y_train = data_train[label].as_matrix()
x_valid = data_valid[factor].as_matrix()
y_valid = data_valid[label].as_matrix() 

#构建模型
model = Sequential()
#初始化权值阈值
model.add(Dense(midnum,input_dim = inputnum,init = 'random_uniform'))	
model.add(Activation('sigmoid'))			#激活函数
model.add(Dense(outputnum,input_dim = midnum))
sgd = SGD(lr = learnrate,decay = Terror,momentum = 0.9,nesterov=True)
model.compile(loss = 'mse',optimizer=sgd)		#目标函数MSE
model.fit(x_train,y_train,nb_epoch = epoch,batch_size = 6,validation_data = (x_valid,y_valid))
model.save_weights(modelfile)

#预测数据
x = ((data_test[factor] - data.min()[factor]) / data_std[factor]).as_matrix()

#写入训练结果
data_test[u'L1_pred'] = model.predict(x) * data_std['L1'] + data.min()['L1']
data_test[u'L1_error'] = round(data_test['L1'] - data_test['L1_pred'],3)
data_test[u'L1_e-per'] =abs(round((data_test['L1_error'] / data_std['L1']) * 100,2))
data_test.to_excel(outputfile) 

#初步分析预测性能
Emin = min(data_test['L1_error'])
Emax = max(data_test['L1_error'])
MSE = (sum(data_test['L1_error'] ** 2) / testnum) ** 0.5
percent = sum(data_test['L1_error'] < Terror) / testnum * 100
print('Emin=',Emin,' Emax=',Emax,' mse=',MSE,'误差低于' + str(Terror) + '的占',percent,'%')






猜你喜欢

转载自blog.csdn.net/qq_28969139/article/details/79767650