简单CNN实现cifar10数据集识别

取自于TensorFlow实战

'''
模型使用两个卷积层,每层使用relu激活函数、最大池化、LRN标准化等trick
使用两个全连接层,使用relu激活函数
输出层不适用激活函数
loss函数使用softmax、交叉熵和L2正则

模型主要使用数据增强、LRN、L2正则等trick大大提高了模型泛化性能
'''
import cifar10,cifar10_input
import tensorflow as tf
import numpy as np
import time
import math
#设置TensorFlow在CPU上的运算优先级
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
#定义模型参数,最大训练次数,minibatch size,cifar数据集位置
max_step=3000#增大该项可以提高准确率
batch_size=128
data_dir='/tmp/cifar10_data/cifar-10-batches-bin'
#定义加L2正则化的权重,输入参数为形状、截断正态分布方差、L2正则系数
#将weight对loss的惩罚项weight_loss存到名为‘losses’的collection中,计算loss时会用到
def variable_with_weight_loss(shape,stddev,wl):
    var=tf.Variable(tf.truncated_normal(shape,stddev=stddev))
    if wl is not None:
        weight_loss=tf.multiply(tf.nn.l2_loss(var),wl,name='weight_loss')
        tf.add_to_collection('losses',weight_loss)
    return var
#下载数据集
cifar10.maybe_download_and_extract()
#加载数据增强后的cifar10 minibatch训练数据集
images_train,labels_train=cifar10_input.distorted_inputs(data_dir=data_dir,batch_size=batch_size)
#加载裁剪后的cifar10 minibatch测试数据集
images_test,labels_test=cifar10_input.inputs(eval_data=True,data_dir=data_dir,batch_size=batch_size)
#定义图片和标签的占位符
image_holder=tf.placeholder(tf.float32,[batch_size,24,24,3])
label_holder=tf.placeholder(tf.int32,[batch_size])
#第一卷积层,卷积、偏置、激活、池化、LRN标准化
w1=variable_with_weight_loss([5,5,3,64],5e-2,0)
kernel1=tf.nn.conv2d(image_holder,w1,[1,1,1,1],padding='SAME')
b1=tf.Variable(tf.constant(0.0,shape=[64]))
conv1=tf.nn.relu(tf.nn.bias_add(kernel1,b1))
pool1=tf.nn.max_pool(conv1,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
normal1=tf.nn.lrn(pool1,4,bias=1,alpha=0.001/9,beta=0.75)
#第二卷积层,卷积、偏置、激活、LRN标准化、池化
w2=variable_with_weight_loss([5,5,64,64],5e-2,0)
kernel2=tf.nn.conv2d(normal1,w2,[1,1,1,1],padding='SAME')
b2=tf.Variable(tf.constant(0.1,shape=[64]))
conv2=tf.nn.relu(tf.nn.bias_add(kernel2,b2))
normal2=tf.nn.lrn(conv2,4,bias=1,alpha=0.001/9,beta=0.75)
pool2=tf.nn.max_pool(normal2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
#第一全连接层,一维化、线性、激活
reshape=tf.reshape(pool2,[batch_size,-1])
dim=reshape.get_shape()[1].value
w3=variable_with_weight_loss([dim,384],0.04,0.004)
b3=tf.Variable(tf.constant(0.1,shape=[384]))
fc3=tf.nn.relu(tf.matmul(reshape,w3)+b3)
#第二全连接层,线性、激活
w4=variable_with_weight_loss([384,192],0.04,0.004)
b4=tf.Variable(tf.constant(0.1,shape=[192]))
fc4=tf.nn.relu(tf.matmul(fc3,w4)+b4)
#第三全连接层、线性(激活函数放在下一步)
w5=variable_with_weight_loss([192,10],1/192,0.0)
b5=tf.Variable(tf.constant(0.0,shape=[10]))
logits=tf.add(tf.matmul(fc4,w5),b5)
#计算loss,使用L2正则进行惩罚
def loss(logits,labels):
    labels=tf.cast(labels,tf.int64)
    cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=labels,name='cross_entropy_per_example')
    cross_entropy_mean=tf.reduce_mean(cross_entropy,name='cross_entropy')
    tf.add_to_collection('losses',cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'),name='total_loss')
#计算loss
loss=loss(logits,label_holder)
#定义优化器,亚当优化器,最小化loss
train=tf.train.AdamOptimizer(1e-3).minimize(loss)
#求正确率最高的分类类别
top_k=tf.nn.in_top_k(logits,label_holder,1)
#定义交互式会话框并初始化计算图
sess=tf.InteractiveSession()
tf.global_variables_initializer().run()
#启动数据增强线程队列
tf.train.start_queue_runners()
#训练过程,计算训练耗时,每example训练耗时,每秒训练example量
for step in range(max_step):
    start_time=time.time()
    image_batch,label_batch=sess.run([images_train,labels_train])
    _,loss_value=sess.run([train,loss],feed_dict={image_holder:image_batch,label_holder:label_batch})
    duration=time.time()-start_time

    if step%10==0:
        examples_per_sec=batch_size/duration
        sec_per_batch=float(duration)

        print('step{:04d}:loss={:.2f}({:.1f} exampe/sec;{:.3f} sec/example)'.format(step,loss_value,examples_per_sec,sec_per_batch))

#计算测试集上的准确率
num_examples=10000
num_iter=int(math.ceil(num_examples/batch_size))
true_count=0
total_sample_count=num_iter*batch_size
for step in range(num_iter):
    image_batch,label_batch=sess.run([images_test,labels_test])
    prediction=sess.run([top_k],feed_dict={image_holder:image_batch,label_holder:label_batch})
    true_count+=np.sum(prediction)

prediction=true_count/total_sample_count
print("prediction @ 1=%.3f"%prediction)

猜你喜欢

转载自blog.csdn.net/qq_41644087/article/details/80497434