关于猫狗大战kaggle的深度学习代码 win10 gpu加速 tensorflow

前言:window10+cuda9.0+cudnn+pycharm+py3.5+tensorflow1.9
这是本文的开发环境

一、数据部分:从kaggle下载

https://www.kaggle.com/c/dogs-vs-cats

下载以后直接解压成两个文件,train和test,直接copy在eclipse的工程目录data中即可
二、结构部分:
这里写图片描述

三、代码部分:
1.input_data.py

#-*- coding:utf-8 -*-
#-*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

#os模块包含操作系统相关的功能,
# 可以处理文件和目录这些我们日常手动需要做的操作。因为我们需要获取test目录下的文件,所以要导入os模块。

#数据构成,在训练数据中,There are 12500 cat,There are 12500 dogs,共25000张
# 获取文件路径和标签
train_dir = '‪D:\\eclipse\\jonasworkproject\\gputest\\data\\train'
def get_files(file_dir):
    # file_dir: 文件夹路径 'D:/Python/neural network/Cats_vs_Dogs/data/train'
    # return: 乱序后的图片和标签

    cats = []
    label_cats = []
    dogs = []
    label_dogs = []
    # 载入数据路径并写入标签值
    for file in os.listdir(file_dir):
        name = file.split(sep='.')
        #name的形式为['dog', '9981', 'jpg']
        #os.listdir将名字转换为列表表达
        if name[0] == 'cat':
            cats.append(file_dir +'/'+ file)
            #file_dir是训练集路径,加/,flie是文件的名字,最后结果D:\\eclipse\\jonasworkproject\\gputest\\data\\train/cat.1.jpg
            label_cats.append(0)
        else:
            dogs.append(file_dir +'/'+ file)
            label_dogs.append(1)
        #猫为0,狗为1

    print("There are %d cats\nThere are %d dogs" % (len(cats), len(dogs)))

    # 打乱文件顺序

    image_list = np.hstack((cats, dogs))
    label_list = np.hstack((label_cats, label_dogs))
    #np.hstack()方法将猫和狗图片和标签整合到一起,标签也整合到一起

    temp = np.array([image_list, label_list])
    #这里的数组出来的是2行12500列,第一行是image_list的数据,第二行是label_list的数据

    temp = temp.transpose()  # 转置
    #将其转换为12500行2列,第一列是image_list的数据,第二列是label_list的数据
    np.random.shuffle(temp)
    #对应的打乱顺序
    image_list = list(temp[:,0])   #取所有行的第0列数据
    label_list = list(temp[:,1])   #取所有行的第1列数据,并转换为int
    label_list = [int(i) for i in label_list]

    return image_list,label_list

# 生成相同大小的批次
def get_batch(image, label, image_W, image_H, batch_size, capacity):
    # image, label: 要生成batch的图像和标签list
    # image_W, image_H: 指定的图片的宽高
    # batch_size: 每个batch有多少张图片
    # capacity: 队列容量
    # return: 图像和标签的batch

    # 将原来的python.list类型转换成tf能够识别的格式
    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)

    # 生成队列。我们使用slice_input_producer()来建立一个队列,将image和label放入一个list中当做参数传给该函数
    input_queue = tf.train.slice_input_producer([image, label])

    image_contents = tf.read_file(input_queue[0])
    #按队列读数据和标签
    label = input_queue[1]
    image = tf.image.decode_jpeg(image_contents, channels=3)
    #要按照图片格式进行解码。本例程中训练数据是jpg格式的,所以使用decode_jpeg()解码器,
    # 如果是其他格式,就要用其他geshi具体可以从官方API中查询。
    # 注意decode出来的数据类型是uint8,之后模型卷积层里面conv2d()要求输入数据为float32类型


    image = tf.image.resize_images(image, [image_H, image_W], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    image = tf.cast(image, tf.float32)
    #原方法从中间裁剪,会把图片丢失信息,此方法是缩放方法

#     image = tf.image.resize_image_with_crop_or_pad(image, image_W,image_H )
#     #采用中心裁剪和扩充 通过缩小图片,太大了是crop,太小了是pad
    image = tf.image.per_image_standardization(image)
    #因为没有标准化,所以需要进行标准化,减去均值,除以方差

    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,   # 线程
                                              capacity=capacity)#capacity是最多容纳多少元素
    # image_batch是一个4D的tensor,[batch, width, height, channels],
    # label_batch是一个1D的tensor,[batch]。

    label_batch = tf.reshape(label_batch, [batch_size])
    #将label_batch重新改变形状,生成batch_size行的数据
    return image_batch, label_batch


# # 以下代码为测试代码,训练时应当注释掉
# import matplotlib.pyplot as plt
#         
# BATCH_SIZE = 2 
# #即只选取两张图片
# CAPACITY = 256
# IMG_W = 208
# IMG_H = 208
#         
# train_dir = 'D:\\eclipse\\jonasworkproject\\gputest\\data\\train'
#         
# image_list, label_list = get_files(train_dir)
# image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
#         
# with tf.Session() as sess:
#     i = 0   #测试一部分
#     coord = tf.train.Coordinator()
#     threads = tf.train.start_queue_runners(coord=coord)
#     #这两个函数监控队列的状态
#     #try finally是一种安全的运行方式
#     try:
#         while not coord.should_stop() and i < 1:
#             img, label = sess.run([image_batch, label_batch])
#         
#             for j in np.arange(BATCH_SIZE):
#                 print("label: %d" % label[j])
#                 plt.imshow(img[j, :, :, :])
#                 plt.show()
#             i += 1
#             #分批次显示
#        
#     except tf.errors.OutOfRangeError:
#         print("done!")
#     finally:
#         coord.request_stop()
#     coord.join(threads)

2.modle.py文件

#-*- coding:utf-8 -*-
from __future__ import print_function  
import tensorflow as tf  

###  
# 模型是仿照TensorFlow的官方例程cifar-10的网络结构来写的。就是两个卷积层(每个卷积层后加一个池化层)  
# 两个全连接层,最后一个softmax输出分类结果。  
###  

##定义各层的参数  
##### 整个卷积过程就是卷积》池化》正则化》卷积》池化》正则化》全连接》全连接》softmax  

def inference(images, batch_size, n_classes):  
    # conv1, shape = [kernel_size, kernel_size, channels, kernel_numbers]  
    with tf.variable_scope("conv1") as scope:  
        weights = tf.get_variable("weights",  
                                  shape=[3, 3, 3, 16],  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))  
        biases = tf.get_variable("biases",  
                                 shape=[16],  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1))  
        conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding="SAME")  
        pre_activation = tf.nn.bias_add(conv, biases)  
        conv1 = tf.nn.relu(pre_activation, name="conv1")  

    # pool1 && norm1  
    with tf.variable_scope("pooling1_lrn") as scope:  
        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],  
                               padding="SAME", name="pooling1")  
        norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0,  
                          beta=0.75, name='norm1')  

    # conv2  
    with tf.variable_scope("conv2") as scope:  
        weights = tf.get_variable("weights",  
                                  shape=[3, 3, 16, 16],  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))  
        biases = tf.get_variable("biases",  
                                 shape=[16],  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1))  
        conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding="SAME")  
        pre_activation = tf.nn.bias_add(conv, biases)  
        conv2 = tf.nn.relu(pre_activation, name="conv2")  

    # pool2 && norm2  
    with tf.variable_scope("pooling2_lrn") as scope:  
        pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],  
                               padding="SAME", name="pooling2")  
        norm2 = tf.nn.lrn(pool2, depth_radius=4, bias=1.0, alpha=0.001/9.0,  
                          beta=0.75, name='norm2')  

##### 整个卷积过程就是卷积》池化》正则化》卷积》池化》正则化》全连接》全连接  

    # full-connect1  
    with tf.variable_scope("fc1") as scope:  
        reshape = tf.reshape(norm2, shape=[batch_size, -1])  
        dim = reshape.get_shape()[1].value  
        weights = tf.get_variable("weights",  
                                  shape=[dim, 128],  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))  
        biases = tf.get_variable("biases",  
                                 shape=[128],  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1))  
        fc1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name="fc1")  

    # full_connect2  
    with tf.variable_scope("fc2") as scope:  
        weights = tf.get_variable("weights",  
                                  shape=[128, 128],  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))  
        biases = tf.get_variable("biases",  
                                 shape=[128],  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1))  
        fc2 = tf.nn.relu(tf.matmul(fc1, weights) + biases, name="fc2")  

    # softmax  
    with tf.variable_scope("softmax_linear") as scope:  
        weights = tf.get_variable("weights",  
                                  shape=[128, n_classes],  
                                  dtype=tf.float32,  
                                  initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))  
        biases = tf.get_variable("biases",  
                                 shape=[n_classes],  
                                 dtype=tf.float32,  
                                 initializer=tf.constant_initializer(0.1))  
        softmax_linear = tf.add(tf.matmul(fc2, weights), biases, name="softmax_linear")  
    return softmax_linear  
    #这里没有用激活函数,因为loss调用的时候默认有激活函数  


def losses(logits, labels):  
    with tf.variable_scope("loss") as scope:  
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,  
                                                                       labels=labels, name="xentropy_per_example")  
        loss = tf.reduce_mean(cross_entropy, name="loss")  
        tf.summary.scalar(scope.name + "loss", loss)   #用于显示  
    return loss  


def trainning(loss, learning_rate):  
    with tf.name_scope("optimizer"):  
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)  
        global_step = tf.Variable(0, name="global_step", trainable=False)  
        train_op = optimizer.minimize(loss, global_step=global_step)  
    return train_op  


def evaluation(logits, labels):  
    with tf.variable_scope("accuracy") as scope:  
        correct = tf.nn.in_top_k(logits, labels, 1)  
        correct = tf.cast(correct, tf.float16)  
        accuracy = tf.reduce_mean(correct)  
        tf.summary.scalar(scope.name + "accuracy", accuracy)  
    return accuracy  

3.training.py文件

#-*- coding:utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import tensorflow as tf
import input_data
import model

N_CLASSES = 2
IMG_H = 208
IMG_W = 208
BATCH_SIZE = 32
CAPACITY = 2000
MAX_STEP = 15000
learning_rate = 0.0001


def run_training():
    train_dir = "E:\\pycharm_project\\kaggle\\data\\train\\"
    logs_train_dir = "E:\\pycharm_project\\kaggle\\logs\\"

    train, train_label = input_data.get_files(train_dir)
    train_batch, train_label_batch = input_data.get_batch(train,
                                                          train_label,
                                                          IMG_W,
                                                          IMG_H,
                                                          BATCH_SIZE,
                                                          CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])

            if step % 100 == 0:
                print("Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print("Done training -- epoch limit reached.")
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()

run_training()

4.模型评估部分

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import tensorflow as tf
import input_data
import model
# 评估模型
from PIL import Image
import matplotlib.pyplot as plt


def get_test_file(test_dir):
    name_list=[]
    for file in os.listdir(test_dir):
        name_list.append(test_dir+'/'+file)
    return name_list



def get_one_image(train):
    n = len(train)
    ind = np.random.randint(0, n)
    img_dir = train[ind]

    print("The picture name is "+ str(img_dir))

    image = Image.open(img_dir)
    plt.imshow(image)
    plt.show()
    image = image.resize([208, 208])
    image = np.array(image)
    return image


def evaluate_one_image():
    test_dir = "E:\\pycharm_project\\kaggle\\data\\test\\"
    test = get_test_file(test_dir)
    image_array = get_one_image(test)

    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model.inference(image, BATCH_SIZE, N_CLASSES)
        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[208, 208, 3])

        logs_train_dir = "E:\\pycharm_project\\kaggle\\logs\\"
        saver = tf.train.Saver()

        with tf.Session() as sess:
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Loading success, global_step is %s" % global_step)
            else:
                print("No checkpoint file found")

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print("This is a cat with possibility %.6f" % prediction[:, 0])
            else:
                print("This is a dog with possibility %.6f" % prediction[:, 1])


evaluate_one_image()

四、结果部分:
这里写图片描述
这张图是训练结束时的熵值和训练准确率,可以看出准确率基本接近1

说一下遇到的坑:
1.不知道在哪里运行程序:如果是eclipse或者pycharm就直接在training.py的最后一行写上run_training()即可
如果是spyder就在kernel里面写这句话,不写这句话是没结果的。
2.文件路径:eclipse里面文件路径不管怎么改都不对,读取不出来,报erroe3,无奈只能用pycharm,结果用本文的路径就写对了,如果不行的话就换一个编译环境
3.原视频中的代码对于模型评估部分用的还是训练集,因此我觉得这样做太偷懒了,我就对代码进行了改写,把test集中的数据加入了模型评估,注意路径部分要改称自己的路径。模型评估部分比较简单,因此没有添加注释。
4.原视频是kevin rush在有图壁上面讲的,但是被墙了,所以有能力的童鞋建议翻一下,找一下这个人的视频讲解,虽然不那么详细,不是给新手听得,但是也许会有帮助。

The End……..

猜你喜欢

转载自blog.csdn.net/qq_32644121/article/details/81561710