cifar10读取图片数据用queue队列,训练

input_data.py直接先将图片路径和标签对应为两个列表,然后用Tensorflow的模块生产批次batch


    
    
  1. import os
  2. import tensorflow as tf
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. train_path = ‘D:/python学习/神经网络动物分类/train/’
  6. test_path = ‘D:/python学习/神经网络动物分类/test/’
  7. classes = [ “airplane”, “automobile”, “bird”, “cat”, “deer”,
  8. “dog”, “frog”, “horse”, “ship”, “truck”]
  9. def get_files(file_dir):
  10. # file_dir: 文件夹路径
  11. # return: 乱序后的图片和标签
  12. img_list = []
  13. label_list = []
  14. for index, name in enumerate(classes):
  15. class_path = file_dir + name + “/”
  16. for img_name in os.listdir(class_path):
  17. img_path = class_path + img_name
  18. img_list.append(img_path)
  19. label_list.append(int(index))
  20. temp = np.array([img_list, label_list])
  21. temp = temp.transpose() # 转置
  22. np.random.shuffle(temp)
  23. img_list = list(temp[:, 0])
  24. label_list = list(temp[:, 1])
  25. label_list = [int(i) for i in label_list]
  26. return img_list, label_list
  27. def get_batch(image, label, image_W, image_H, batch_size, capacity):
  28. # image, label: 要生成batch的图像的地址和标签list
  29. # image_W, image_H: 图片的宽高
  30. # batch_size: 每个batch有多少张图片
  31. # capacity: 队列容量
  32. # return: 图像和标签的batch
  33. # 将python.list类型转换成tf能够识别的格式
  34. image = tf.cast(image, tf.string)
  35. label = tf.cast(label, tf.int32)
  36. # 生成队列
  37. input_queue = tf.train.slice_input_producer([image, label])
  38. image_contents = tf.read_file(input_queue[ 0])
  39. label = input_queue[ 1]
  40. image = tf.image.decode_jpeg(image_contents, channels= 3)
  41. image = tf.image.resize_images(image, [image_H, image_W], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
  42. image = tf.cast(image, tf.float32)
  43. image_batch, label_batch = tf.train.batch([image, label],
  44. batch_size=batch_size,
  45. num_threads= 64, # 线程
  46. capacity=capacity)
  47. return image_batch, label_batch
  48. # 测试两个函数是否成功运行
  49. ”“”
  50. if __name__ == ‘__main__’:
  51. BATCH_SIZE = 2
  52. CAPACITY = 256
  53. IMG_W = 32
  54. IMG_H = 32
  55. image_list, label_list = get_files(train_path)
  56. image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
  57. with tf.Session() as sess:
  58. i = 0
  59. coord = tf.train.Coordinator()
  60. threads = tf.train.start_queue_runners(coord=coord)
  61. try:
  62. while not coord.should_stop() and i < 1:
  63. img, label = sess.run([image_batch, label_batch])
  64. for j in np.arange(BATCH_SIZE):
  65. print(“label: %d” % label[j])
  66. plt.imshow(img[j, :, :, :])
  67. plt.show()
  68. i += 1
  69. except tf.errors.OutOfRangeError:
  70. print(“done!”)
  71. finally:
  72. coord.request_stop()
  73. coord.join(threads)
  74. “”“

model.py函数实现了模型以及预测


    
    
  1. #coding=utf-8
  2. import tensorflow as tf
  3. def inference(images, batch_size, n_classes):
  4. with tf.variable_scope( 'conv1') as scope:
  5. # 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
  6. weights = tf.get_variable( 'weights',
  7. shape=[ 3, 3, 3, 16],
  8. dtype=tf.float32,
  9. initializer=tf.truncated_normal_initializer(stddev= 0.1, dtype=tf.float32))
  10. biases = tf.get_variable( 'biases',
  11. shape=[ 16],
  12. dtype=tf.float32,
  13. initializer=tf.constant_initializer( 0.1))
  14. conv = tf.nn.conv2d(images, weights, strides=[ 1, 1, 1, 1], padding= 'SAME')
  15. pre_activation = tf.nn.bias_add(conv, biases)
  16. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  17. with tf.variable_scope( 'pooling1_lrn') as scope:
  18. pool1 = tf.nn.max_pool(conv1, ksize=[ 1, 3, 3, 1], strides=[ 1, 2, 2, 1], padding= 'SAME', name= 'pooling1')
  19. norm1 = tf.nn.lrn(pool1, depth_radius= 4, bias= 1.0, alpha= 0.001 / 9.0, beta= 0.75, name= 'norm1')
  20. with tf.variable_scope( 'conv2') as scope:
  21. weights = tf.get_variable( 'weights',
  22. shape=[ 3, 3, 16, 16],
  23. dtype=tf.float32,
  24. initializer=tf.truncated_normal_initializer(stddev= 0.1, dtype=tf.float32))
  25. biases = tf.get_variable( 'biases',
  26. shape=[ 16],
  27. dtype=tf.float32,
  28. initializer=tf.constant_initializer( 0.1))
  29. conv = tf.nn.conv2d(norm1, weights, strides=[ 1, 1, 1, 1], padding= 'SAME')
  30. pre_activation = tf.nn.bias_add(conv, biases)
  31. conv2 = tf.nn.relu(pre_activation, name= 'conv2')
  32. # pool2 and norm2
  33. with tf.variable_scope( 'pooling2_lrn') as scope:
  34. norm2 = tf.nn.lrn(conv2, depth_radius= 4, bias= 1.0, alpha= 0.001 / 9.0, beta= 0.75, name= 'norm2')
  35. pool2 = tf.nn.max_pool(norm2, ksize=[ 1, 3, 3, 1], strides=[ 1, 1, 1, 1], padding= 'SAME', name= 'pooling2')
  36. with tf.variable_scope( 'local3') as scope:
  37. reshape = tf.reshape(pool2, shape=[batch_size, -1])
  38. dim = reshape.get_shape()[ 1].value
  39. weights = tf.get_variable( 'weights',
  40. shape=[dim, 128],
  41. dtype=tf.float32,
  42. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  43. biases = tf.get_variable( 'biases',
  44. shape=[ 128],
  45. dtype=tf.float32,
  46. initializer=tf.constant_initializer( 0.1))
  47. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  48. # local4
  49. with tf.variable_scope( 'local4') as scope:
  50. weights = tf.get_variable( 'weights',
  51. shape=[ 128, 128],
  52. dtype=tf.float32,
  53. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  54. biases = tf.get_variable( 'biases',
  55. shape=[ 128],
  56. dtype=tf.float32,
  57. initializer=tf.constant_initializer( 0.1))
  58. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name= 'local4')
  59. # softmax
  60. with tf.variable_scope( 'softmax_linear') as scope:
  61. weights = tf.get_variable( 'softmax_linear',
  62. shape=[ 128, n_classes],
  63. dtype=tf.float32,
  64. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  65. biases = tf.get_variable( 'biases',
  66. shape=[n_classes],
  67. dtype=tf.float32,
  68. initializer=tf.constant_initializer( 0.1))
  69. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name= 'softmax_linear')
  70. return softmax_linear
  71. def losses(logits, labels):
  72. with tf.variable_scope( 'loss') as scope:
  73. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  74. (logits=logits, labels=labels, name= 'xentropy_per_example')
  75. loss = tf.reduce_mean(cross_entropy, name= 'loss')
  76. tf.summary.scalar(scope.name + '/loss', loss)
  77. return loss
  78. def trainning(loss, learning_rate):
  79. with tf.name_scope( 'optimizer'):
  80. optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
  81. global_step = tf.Variable( 0, name= 'global_step', trainable= False)
  82. train_op = optimizer.minimize(loss, global_step= global_step)
  83. return train_op
  84. def evaluation(logits, labels):
  85. with tf.variable_scope( 'accuracy') as scope:
  86. correct = tf.nn.in_top_k(logits, labels, 1)
  87. correct = tf.cast(correct, tf.float16)
  88. accuracy = tf.reduce_mean(correct)
  89. tf.summary.scalar(scope.name + '/accuracy', accuracy)
  90. return accuracy

train.py函数实现了训练过程


    
    
  1. import os
  2. import numpy as np
  3. import tensorflow as tf
  4. import input_data
  5. import model
  6. N_CLASSES = 10
  7. IMG_H = 32
  8. IMG_W = 32
  9. BATCH_SIZE = 200
  10. CAPACITY = 2000
  11. MAX_STEP = 15000
  12. learning_rate = 0.0001
  13. def run_training():
  14. train_dir = "D:\\python学习\\神经网络动物分类\\train\\"
  15. logs_train_dir = "logs\\"
  16. train, train_label = input_data.get_files(train_dir)
  17. train_batch, train_label_batch = input_data.get_batch(train,
  18. train_label,
  19. IMG_W,
  20. IMG_H,
  21. BATCH_SIZE,
  22. CAPACITY)
  23. train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
  24. train_loss = model.losses(train_logits, train_label_batch)
  25. train_op = model.trainning(train_loss, learning_rate)
  26. train_acc = model.evaluation(train_logits, train_label_batch)
  27. summary_op = tf.summary.merge_all()
  28. sess = tf.Session()
  29. train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
  30. saver = tf.train.Saver()
  31. sess.run(tf.global_variables_initializer())
  32. coord = tf.train.Coordinator()
  33. threads = tf.train.start_queue_runners(sess=sess, coord=coord)
  34. try:
  35. for step in np.arange(MAX_STEP):
  36. if coord.should_stop():
  37. break
  38. _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
  39. if step % 100 == 0:
  40. print( "Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc))
  41. summary_str = sess.run(summary_op)
  42. train_writer.add_summary(summary_str, step)
  43. if step % 2000 == 0 or (step + 1) == MAX_STEP:
  44. checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
  45. saver.save(sess, checkpoint_path, global_step=step)
  46. except tf.errors.OutOfRangeError:
  47. print( "Done training -- epoch limit reached.")
  48. finally:
  49. coord.request_stop()
  50. coord.join(threads)
  51. sess.close()
  52. if __name__ == '__main__':
  53. run_training()

            </div>

input_data.py直接先将图片路径和标签对应为两个列表,然后用Tensorflow的模块生产批次batch


  
  
  1. import os
  2. import tensorflow as tf
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. train_path = ‘D:/python学习/神经网络动物分类/train/’
  6. test_path = ‘D:/python学习/神经网络动物分类/test/’
  7. classes = [ “airplane”, “automobile”, “bird”, “cat”, “deer”,
  8. “dog”, “frog”, “horse”, “ship”, “truck”]
  9. def get_files(file_dir):
  10. # file_dir: 文件夹路径
  11. # return: 乱序后的图片和标签
  12. img_list = []
  13. label_list = []
  14. for index, name in enumerate(classes):
  15. class_path = file_dir + name + “/”
  16. for img_name in os.listdir(class_path):
  17. img_path = class_path + img_name
  18. img_list.append(img_path)
  19. label_list.append(int(index))
  20. temp = np.array([img_list, label_list])
  21. temp = temp.transpose() # 转置
  22. np.random.shuffle(temp)
  23. img_list = list(temp[:, 0])
  24. label_list = list(temp[:, 1])
  25. label_list = [int(i) for i in label_list]
  26. return img_list, label_list
  27. def get_batch(image, label, image_W, image_H, batch_size, capacity):
  28. # image, label: 要生成batch的图像的地址和标签list
  29. # image_W, image_H: 图片的宽高
  30. # batch_size: 每个batch有多少张图片
  31. # capacity: 队列容量
  32. # return: 图像和标签的batch
  33. # 将python.list类型转换成tf能够识别的格式
  34. image = tf.cast(image, tf.string)
  35. label = tf.cast(label, tf.int32)
  36. # 生成队列
  37. input_queue = tf.train.slice_input_producer([image, label])
  38. image_contents = tf.read_file(input_queue[ 0])
  39. label = input_queue[ 1]
  40. image = tf.image.decode_jpeg(image_contents, channels= 3)
  41. image = tf.image.resize_images(image, [image_H, image_W], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
  42. image = tf.cast(image, tf.float32)
  43. image_batch, label_batch = tf.train.batch([image, label],
  44. batch_size=batch_size,
  45. num_threads= 64, # 线程
  46. capacity=capacity)
  47. return image_batch, label_batch
  48. # 测试两个函数是否成功运行
  49. ”“”
  50. if __name__ == ‘__main__’:
  51. BATCH_SIZE = 2
  52. CAPACITY = 256
  53. IMG_W = 32
  54. IMG_H = 32
  55. image_list, label_list = get_files(train_path)
  56. image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
  57. with tf.Session() as sess:
  58. i = 0
  59. coord = tf.train.Coordinator()
  60. threads = tf.train.start_queue_runners(coord=coord)
  61. try:
  62. while not coord.should_stop() and i < 1:
  63. img, label = sess.run([image_batch, label_batch])
  64. for j in np.arange(BATCH_SIZE):
  65. print(“label: %d” % label[j])
  66. plt.imshow(img[j, :, :, :])
  67. plt.show()
  68. i += 1
  69. except tf.errors.OutOfRangeError:
  70. print(“done!”)
  71. finally:
  72. coord.request_stop()
  73. coord.join(threads)
  74. “”“

model.py函数实现了模型以及预测


  
  
  1. #coding=utf-8
  2. import tensorflow as tf
  3. def inference(images, batch_size, n_classes):
  4. with tf.variable_scope( 'conv1') as scope:
  5. # 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
  6. weights = tf.get_variable( 'weights',
  7. shape=[ 3, 3, 3, 16],
  8. dtype=tf.float32,
  9. initializer=tf.truncated_normal_initializer(stddev= 0.1, dtype=tf.float32))
  10. biases = tf.get_variable( 'biases',
  11. shape=[ 16],
  12. dtype=tf.float32,
  13. initializer=tf.constant_initializer( 0.1))
  14. conv = tf.nn.conv2d(images, weights, strides=[ 1, 1, 1, 1], padding= 'SAME')
  15. pre_activation = tf.nn.bias_add(conv, biases)
  16. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  17. with tf.variable_scope( 'pooling1_lrn') as scope:
  18. pool1 = tf.nn.max_pool(conv1, ksize=[ 1, 3, 3, 1], strides=[ 1, 2, 2, 1], padding= 'SAME', name= 'pooling1')
  19. norm1 = tf.nn.lrn(pool1, depth_radius= 4, bias= 1.0, alpha= 0.001 / 9.0, beta= 0.75, name= 'norm1')
  20. with tf.variable_scope( 'conv2') as scope:
  21. weights = tf.get_variable( 'weights',
  22. shape=[ 3, 3, 16, 16],
  23. dtype=tf.float32,
  24. initializer=tf.truncated_normal_initializer(stddev= 0.1, dtype=tf.float32))
  25. biases = tf.get_variable( 'biases',
  26. shape=[ 16],
  27. dtype=tf.float32,
  28. initializer=tf.constant_initializer( 0.1))
  29. conv = tf.nn.conv2d(norm1, weights, strides=[ 1, 1, 1, 1], padding= 'SAME')
  30. pre_activation = tf.nn.bias_add(conv, biases)
  31. conv2 = tf.nn.relu(pre_activation, name= 'conv2')
  32. # pool2 and norm2
  33. with tf.variable_scope( 'pooling2_lrn') as scope:
  34. norm2 = tf.nn.lrn(conv2, depth_radius= 4, bias= 1.0, alpha= 0.001 / 9.0, beta= 0.75, name= 'norm2')
  35. pool2 = tf.nn.max_pool(norm2, ksize=[ 1, 3, 3, 1], strides=[ 1, 1, 1, 1], padding= 'SAME', name= 'pooling2')
  36. with tf.variable_scope( 'local3') as scope:
  37. reshape = tf.reshape(pool2, shape=[batch_size, -1])
  38. dim = reshape.get_shape()[ 1].value
  39. weights = tf.get_variable( 'weights',
  40. shape=[dim, 128],
  41. dtype=tf.float32,
  42. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  43. biases = tf.get_variable( 'biases',
  44. shape=[ 128],
  45. dtype=tf.float32,
  46. initializer=tf.constant_initializer( 0.1))
  47. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  48. # local4
  49. with tf.variable_scope( 'local4') as scope:
  50. weights = tf.get_variable( 'weights',
  51. shape=[ 128, 128],
  52. dtype=tf.float32,
  53. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  54. biases = tf.get_variable( 'biases',
  55. shape=[ 128],
  56. dtype=tf.float32,
  57. initializer=tf.constant_initializer( 0.1))
  58. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name= 'local4')
  59. # softmax
  60. with tf.variable_scope( 'softmax_linear') as scope:
  61. weights = tf.get_variable( 'softmax_linear',
  62. shape=[ 128, n_classes],
  63. dtype=tf.float32,
  64. initializer=tf.truncated_normal_initializer(stddev= 0.005, dtype=tf.float32))
  65. biases = tf.get_variable( 'biases',
  66. shape=[n_classes],
  67. dtype=tf.float32,
  68. initializer=tf.constant_initializer( 0.1))
  69. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name= 'softmax_linear')
  70. return softmax_linear
  71. def losses(logits, labels):
  72. with tf.variable_scope( 'loss') as scope:
  73. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  74. (logits=logits, labels=labels, name= 'xentropy_per_example')
  75. loss = tf.reduce_mean(cross_entropy, name= 'loss')
  76. tf.summary.scalar(scope.name + '/loss', loss)
  77. return loss
  78. def trainning(loss, learning_rate):
  79. with tf.name_scope( 'optimizer'):
  80. optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)
  81. global_step = tf.Variable( 0, name= 'global_step', trainable= False)
  82. train_op = optimizer.minimize(loss, global_step= global_step)
  83. return train_op
  84. def evaluation(logits, labels):
  85. with tf.variable_scope( 'accuracy') as scope:
  86. correct = tf.nn.in_top_k(logits, labels, 1)
  87. correct = tf.cast(correct, tf.float16)
  88. accuracy = tf.reduce_mean(correct)
  89. tf.summary.scalar(scope.name + '/accuracy', accuracy)
  90. return accuracy

train.py函数实现了训练过程


  
  
  1. import os
  2. import numpy as np
  3. import tensorflow as tf
  4. import input_data
  5. import model
  6. N_CLASSES = 10
  7. IMG_H = 32
  8. IMG_W = 32
  9. BATCH_SIZE = 200
  10. CAPACITY = 2000
  11. MAX_STEP = 15000
  12. learning_rate = 0.0001
  13. def run_training():
  14. train_dir = "D:\\python学习\\神经网络动物分类\\train\\"
  15. logs_train_dir = "logs\\"
  16. train, train_label = input_data.get_files(train_dir)
  17. train_batch, train_label_batch = input_data.get_batch(train,
  18. train_label,
  19. IMG_W,
  20. IMG_H,
  21. BATCH_SIZE,
  22. CAPACITY)
  23. train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
  24. train_loss = model.losses(train_logits, train_label_batch)
  25. train_op = model.trainning(train_loss, learning_rate)
  26. train_acc = model.evaluation(train_logits, train_label_batch)
  27. summary_op = tf.summary.merge_all()
  28. sess = tf.Session()
  29. train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
  30. saver = tf.train.Saver()
  31. sess.run(tf.global_variables_initializer())
  32. coord = tf.train.Coordinator()
  33. threads = tf.train.start_queue_runners(sess=sess, coord=coord)
  34. try:
  35. for step in np.arange(MAX_STEP):
  36. if coord.should_stop():
  37. break
  38. _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
  39. if step % 100 == 0:
  40. print( "Step %d, train loss = %.2f, train accuracy = %.2f%%" % (step, tra_loss, tra_acc))
  41. summary_str = sess.run(summary_op)
  42. train_writer.add_summary(summary_str, step)
  43. if step % 2000 == 0 or (step + 1) == MAX_STEP:
  44. checkpoint_path = os.path.join(logs_train_dir, "model.ckpt")
  45. saver.save(sess, checkpoint_path, global_step=step)
  46. except tf.errors.OutOfRangeError:
  47. print( "Done training -- epoch limit reached.")
  48. finally:
  49. coord.request_stop()
  50. coord.join(threads)
  51. sess.close()
  52. if __name__ == '__main__':
  53. run_training()

            </div>

猜你喜欢

转载自blog.csdn.net/m0_37192554/article/details/81331669