CAR---partly--code

# -*- coding: UTF-8 -*- 
'''
Created on 2017年12月18日
@author: 
'''
from scipy.io import loadmat
import numpy as np
import tensorflow as tf 
  
Dynamic_dict=loadmat("newDynamicPower")
Static_dict=loadmat("newStaticPower")
#查看有返回的类型和他的键  
# print("type of result:",type(Dynamic_dict))  
# print("keys:",Dynamic_dict.keys())  
#查看键的内容  
#DynamicFeature  
# print("newDynamicPower:",Dynamic_dict['newDynamicPower'])  
newDynamicPower = Dynamic_dict['newDynamicPower']  
newStaticPower = Static_dict['newStaticPower']  
#查看ndarray的维度信息
# print newDynamicPower.shape     #(21,10,58)
# print newStaticPower.shape       #(21,10,58)
#查看数据类型
# print newDynamicPower.dtype      #float64
  
#ndarray如何变换轴 也就是把(21,10,58)变成(58,21,10)
#参考https://www.cnblogs.com/sunshinewang/p/6893503.html
xx=newDynamicPower.transpose((1,0,2))   #第一个和第二个换位置
yy=newStaticPower.transpose((1,0,2))
Dynamic=xx.transpose(2,1,0)    #第一个和第三个换位置
Static=yy.transpose((2,1,0))
WholeData=np.concatenate((Dynamic,Static),axis=0)   #axis=0表示在行数上进行合并    116*21*10
newWholeData =WholeData.reshape(116,21,10,1)
#  tf.reshape(WholeData, [-1,21,10,1])         #-1代表有多少就取多少 
print newWholeData.shape
# print Dynamic.shape  #(58,21,10)
# print Static.shape     #(58,21,10)
  
#下面要制作两类的标签了
#如何构造全是1和0的ndarray
temp1=np.ones(shape=(58,1))
temp2=np.zeros(shape=(58,1))
# print temp2
label_Dynamic=np.concatenate((temp1,temp2),axis=1)   #axis=1表示在列数上进行合并    58*2
label_Static=np.concatenate((temp2,temp1),axis=1)   #axis=1表示在列数上进行合并      58*2
WholeLabel=np.concatenate((label_Dynamic,label_Static),axis=0)  #axis=0表示在行数上进行合并   116*2
# print WholeLabel.shape
# print label_Static      
  
#输入数据和输出数据有了,接下来就是要真正地构建CNN模型了
x = tf.placeholder(tf.float32,[None, 21,10,1]) #输入数据占位符,每一个sample都是(21,10)维,none表示有无数个sample  
y_ = tf.placeholder(tf.float32, [None,2])  #输出标签占位符,每一个sample都是2维,因为是one_hot  
  
#初始化权重和偏置项参数  
#tf.truncated_normal(shape, mean, stddev) :shape表示生成张量的维度,mean是均值,stddev是标准差。  
#正态分布  
def weight_variable(shape):     
    initial = tf.truncated_normal(shape, stddev = 0.1)    #均值为0 标准差为0.1
    return tf.Variable(initial)  
  
def bias_variable(shape):  
    initial = tf.constant(0.1, shape = shape)    #统一值为0.1
    return tf.Variable(initial) 
  
def conv2done(x, W):     #做卷积,步长为1
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')  #用valid这样就是在一个维度上运动了
  
def conv2dtwo(x, W):     #做卷积,步长为2
    return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='VALID')  #用valid这样就是在一个维度上运动了
      
#第一层卷积   
W_conv1 = weight_variable([21, 1, 1, 6])     #调用权值初始化函数  卷积核21*1 通道1 输出通道6,也就是6个卷积核--第一个卷积层的权重参数
b_conv1 = bias_variable([6])  #调用偏置项初始化函数 共享权重,所以没一个卷积核一个偏执项
  
h_conv1 = tf.nn.relu(conv2done(x, W_conv1) + b_conv1)   
print h_conv1.get_shape()     #用以输出经过第一层卷积处理后的维度 #(?, 1, 10, 6) 每一个滤波器的参数个数为21+1
  
#第二层卷积
W_conv2 = weight_variable([1, 2, 6, 12])   #上一次得到的6个图,现在用12个filter了 
b_conv2 = bias_variable([12])
  
h_conv2 = tf.nn.relu(conv2dtwo(h_conv1, W_conv2) + b_conv2) 
print h_conv2.get_shape()   # ? 1 5 12  每一个滤波器的参数个数为2*6+1  特征图越来越小,但通道数越来越多
  
#全连接层  full-connected network
W_fc1 = weight_variable([1* 5 * 12, 50])     #也就是有50个neuron
b_fc1 = bias_variable([50])  
h_conv2_flat = tf.reshape(h_conv2, [-1, 1*5*12])    #进行flatten操作,搞成向量
h_fc1 = tf.nn.relu(tf.matmul(h_conv2_flat, W_fc1) + b_fc1)  
print h_fc1.get_shape()   # ? 50
  
 #Dropout   
#为了减少过拟合,我们在输出层之前加入dropout。  
keep_prob = tf.placeholder("float")  
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 
  
#输出层  
#最后,我们添加一个softmax层,就像前面的单层softmax regression一样, 这里只有2类
W_fc2 = weight_variable([50, 2])  
b_fc2 = bias_variable([2])  
  
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) 
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))   #y_是真实的,y_conv是CNN的输出
  
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)  
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) 
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))      #正确率
  
#训练和评估模型  
#我们会用更加复杂的ADAM优化器来做梯度最速下降,在feed_dict中加入额外的参数keep_prob来控制dropout比例。  
#然后每100次迭代输出一次日志。 
sess = tf.InteractiveSession()
# sess = tf.Session()  
# sess.run(tf.initialize_all_variables())     #初始化所有参数
sess.run(tf.global_variables_initializer())     
# merged = tf.merge_all_summaries() #collect the tf.xxxxx_summary  
# writer = tf.train.SummaryWriter('/home/miaominmin/tensorBoardLog/MNISTtwo',sess.graph)   
for i in range(10000):  
#     summary,loss, _= sess.run([merged, cross_entropy, train_step], feed_dict={x:batch[0],y_:batch[1]}, keep_prob:0.5)  #train_step是一个操作,step表示每一步
    #注意是操作;给模型必要的输入,以及必要的操作指示
    if i%50 == 0:  
        train_accuracy = accuracy.eval(feed_dict={x:newWholeData, y_: WholeLabel, keep_prob: 1.0})           #在验证及测试时候都是全接连  
        print "step %d, training accuracy %g"%(i, train_accuracy)  
    W_conv1_value,_=sess.run([W_conv1,train_step],feed_dict={x: newWholeData, y_: WholeLabel, keep_prob: 0.5}) 
# print "test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 
print W_conv1_value[:,0,0,1]
print W_conv1_value[:,0,0,0]
print W_conv1_value[:,0,0,2]
print W_conv1_value[:,0,0,3]
print W_conv1_value[:,0,0,4]
print W_conv1_value[:,0,0,5]

'''[-0.26407599  0.03266463 -0.04081718  0.31727383  0.43744254 -0.03090868
 -0.12897374  0.20652995  0.46201885  0.43942451 -0.05871432  0.38748711
  0.32272327  0.25605536 -0.09641168  0.08256926  0.12576579  0.38763544
 -0.11899859 -0.00729314  0.0253561 ]  '''

猜你喜欢

转载自blog.csdn.net/qq_28088259/article/details/78864567
car