双层卷积神经网络--tf

--模拟实现《21个项目玩转深度学习》第一章1.2.2节的代码。

--过程中发现自己有以下欠缺:

        1、tensor的reshape,不能使用 tensor.reshape(shape=[...]),应该使用  tf.reshape(tensor,shape=[...])

         2、dropout应用full_connect层,用函数 tf.nn.dropout(feature_tensor,keep_prob=...)

        3、经验:softmax_cross_entropy_with_logits内部是不是已经有了对log(0)的异常预防操作?

#-*-coding:utf-8-*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('./MNIST_data',one_hot=True)

def Weight(shape):
    weight = tf.Variable(tf.truncated_normal(shape=shape,stddev=0.1))
    return weight

def Bias(shape):
    initial = tf.constant(0.1,shape=shape)
    bias = tf.Variable(initial_value=initial)
    return bias

def Conv2D(tensor,weight):
    conv = tf.nn.conv2d(tensor,weight,strides=[1,1,1,1],padding='SAME')
    return conv
def Maxpool(tensor):
    pool = tf.nn.max_pool(tensor,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
    return pool

x = tf.placeholder(shape=[None,784],dtype=tf.float32)
y_ = tf.placeholder(shape=[None,10],dtype=tf.int32)
keep_prob = tf.placeholder(tf.float32)

x_ = tf.reshape(x,shape=[-1,28,28,1])

weight1 = Weight(shape=[5,5,1,32])
b1 = Bias(shape=[32])

weight2 = Weight(shape=[5,5,32,64])
b2=Bias(shape=[64])

conv1 = tf.nn.relu(Conv2D(x_,weight1)+b1)
pool1 = Maxpool(conv1)

conv2 = tf.nn.relu(Conv2D(pool1,weight2)+b2)
pool2 = Maxpool(conv2)

w1_fc = Weight(shape=[7*7*64,1024])
b1_fc = Bias(shape=[1024])
pool2_flat = tf.reshape(pool2,shape=[-1,7*7*64])
fc1 = tf.nn.relu(tf.matmul(pool2_flat,w1_fc)+b1_fc)
fc1_dropout = tf.nn.dropout(fc1,keep_prob=keep_prob)


w2_fc = Weight(shape=[1024,10])
b2_fc = Bias(shape=[10])
fc2 = tf.matmul(fc1_dropout,w2_fc)+b2_fc
y = tf.nn.softmax(fc2)
y = tf.clip_by_value(y,1e-8,1)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=fc2)
loss = cross_entropy

optimizer = tf.train.AdamOptimizer(0.001).minimize(loss)

correct_predict = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_,1),tf.argmax(y,1)),tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    for i in range(10000):
        xs,ys = mnist.train.next_batch(100)
        sess.run(optimizer,feed_dict={x:xs,y_:ys,keep_prob:0.5})

        if i%1000 == 0:
            correct_rate = sess.run(correct_predict,feed_dict={x:xs,y_:ys,keep_prob:1})
            print('steps %d,correct_rate = %.4f'%(i,correct_rate))

 

猜你喜欢

转载自blog.csdn.net/Strive_For_Future/article/details/81633802
今日推荐