ubuntu16.04 TensorFlow目标检测API安装,等安装完成后,如果没有安装过tensorboard,最好顺便安装一下,以便观察loss等指标的变化

为一动漫网站做一个图片搜索引擎,需要先定位动漫人物的脸,直接用opencv里的人脸检测API什么都框不出来,仔细想想也对,漫画里的人物没一个是正常的,眼睛都好大,鼻子几乎没有偷笑,木法所以只好自己训练咯。

先安装TensorFlow object detection API,可以参考ubuntu16.04 TensorFlow目标检测API安装,等安装完成后,如果没有安装过tensorboard,最好顺便安装一下,以便观察loss等指标的变化

pip install tensorboard
tensorboard --logdir /xxxx/model_ckpt/ # model_ckpt检查点的保存路径

一、准备数据集

tensorflow的目标检测有自己的格式,需要转换一下。首先需要一个图像标注软件,将我们数据集中每副图片的动漫脸标注出来,我使用的是labelImg,下载地址:

链接: https://pan.baidu.com/s/1i7oxr1r 密码: nkmz,解压后进入到\windows_v1.4.0\下运行labelImg.exe就ok了(需要python环境),打开图片后框出需要检测的目标,给一个标签,然后“保存”,labelImg保存的是一个个的XML文件,如此下去将训练集中的图片全部标注出来(一件没任何技术含量而且还很费时间的事情),两个小时后标注了100多张图片,暂时做测试足够了。


生成的XML文件如下:

<annotation>
	<folder>AIR</folder>
	<filename>吃团子可爱的神尾观铃.jpg</filename>
	<path>E:\code\image_search\image\人物\AIR\吃团子可爱的神尾观铃.jpg</path>
	<source>
		<database>Unknown</database>
	</source>
	<size>
		<width>800</width>
		<height>564</height>
		<depth>3</depth>
	</size>
	<segmented>0</segmented>
	<object>
		<name>person</name>
		<pose>Unspecified</pose>
		<truncated>0</truncated>
		<difficult>0</difficult>
		<bndbox>
			<xmin>382</xmin>
			<ymin>163</ymin>
			<xmax>572</xmax>
			<ymax>313</ymax>
		</bndbox>
	</object>
</annotation>
图片中标注了多少目标就会有几个<object>,然后开始转成tf 所需要的数据格式,具体格式说明请参考https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md,这里我是先把xml转成了csv然后又转换成tf.record格式的

xml_metadata_csv.py

import os
import glob
import cv2
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET


def png_to_jpg(png_path):
    jpg_path = png_path.replace('.png','.jpg')
    im = cv2.imdecode(np.fromfile(png_path, dtype=np.uint8), -1)
    cv2.imencode('.jpg', im)[1].tofile(jpg_path)
    return jpg_path

def xml_to_csv(path):
    xml_list = []
    for dir in os.listdir(path):
        img_dir = os.path.join(path, dir)
        for xml_file in glob.glob(img_dir + '\*.xml'):
            tree = ET.parse(xml_file)
            root = tree.getroot()
            file_name = root.find('path').text            
            # if file_name.find('.png') > 0:
            #     file_name = png_to_jpg(file_name)

            for member in root.findall('object'):
                value = (file_name,
                         int(root.find('size')[0].text),
                         int(root.find('size')[1].text), member[0].text,
                         int(member[4][0].text),
                         int(member[4][1].text),
                         int(member[4][2].text),
                         int(member[4][3].text)
                         )
                xml_list.append(value)
    column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
    xml_df = pd.DataFrame(xml_list, columns=column_name)
    return xml_df


def main():
    image_path = r'E:\code\MechineLeaning\image_search\CNN'
    xml_df = xml_to_csv(image_path)
    xml_df.to_csv('CSVMETADATAFILE.csv', index=None)
    print('successfully converted xml metadata to csv')

if __name__ == '__main__':
    main()

png_to_jpg()需要注意,这个是python3写带中文路径图片的一个处理方法,不能直接用cv2.write()

csv转tf.record,generate_tfrecord.py

from __future__ import division
from __future__ import print_function
from __future__ import absolute_import

import os
import io
import pandas as pd
import tensorflow as tf

from PIL import Image
from collections import namedtuple
import dataset_util

import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')

flags = tf.app.flags
flags.DEFINE_string('csv_input', 'CSVMETADATAFILE.csv', 'Path to the CSV input')
flags.DEFINE_string('output_path', 'person_val.record', 'Path to output TFRecord')
FLAGS = flags.FLAGS


# 将标签转换成对应的数值,从1开始
def class_text_to_int(row_label):
    if row_label == 'person':
        return 1
    else:
        None

def split(df, group):
    data = namedtuple('data', ['filename', 'object'])
    gb = df.groupby(group)
    return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]


def create_tf(group):
    with tf.gfile.GFile(group.filename, 'rb') as fid:
        encoded_jpg = fid.read()
    encoded_jpg_io = io.BytesIO(encoded_jpg)
    image = Image.open(encoded_jpg_io)
    width, height = image.size

    filename = group.filename.encode('utf8')
    image_format = b'jpg'
    xmins = []
    xmaxs = []
    ymins = []
    ymaxs = []
    classes_text = []
    classes = []

    for index, row in group.object.iterrows():
        xmins.append(row['xmin'] / width)
        xmaxs.append(row['xmax'] / width)
        ymins.append(row['ymin'] / height)
        ymaxs.append(row['ymax'] / height)
        classes_text.append(row['class'].encode('utf8'))
        classes.append(class_text_to_int(row['class']))

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(height),
        'image/width': dataset_util.int64_feature(width),
        'image/filename': dataset_util.bytes_feature(filename),
        'image/source_id': dataset_util.bytes_feature(filename),
        'image/encoded': dataset_util.bytes_feature(encoded_jpg),
        'image/format': dataset_util.bytes_feature(image_format),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
    }))
    return tf_example


def main(_):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
    # path = os.path.join(os.getcwd(), path_images)
    examples = pd.read_csv(FLAGS.csv_input)
    grouped = split(examples, 'filename')

    for group in grouped:
        tf_example = create_tf(group)
        writer.write(tf_example.SerializeToString())

    writer.close()
    output_path = os.path.join(os.getcwd(), FLAGS.output_path)
    print('Successfully created the TFRecords: {}'.format(output_path))


if __name__ == '__main__':
    tf.app.run()
二、使用tf object detection API

在安装好API后的object_detection目录下有好几个py文件,使用train.py来进行模型训练,tf给出的调用例子:

Example usage:
    ./train \
        --logtostderr \
        --train_dir=path/to/train_dir \
        --model_config_path=model_config.pbtxt \
        --train_config_path=train_config.pbtxt \
        --input_config_path=train_input_config.pbtxt
所以我们还需要准备运行train.py需要的三个文件,分别是 model_config.pbtxt、train_config.pbtxt、train_input_config.pbtxt

还有一种调用方式,是配置一个pipeline_config.pbtxt文件,其实就是把上面的三个文件写在一个文件里面了,我是为了清晰点所以写了三个文件。

model_config.pbtxt

  ssd {
    num_classes: 1
    box_coder {
      faster_rcnn_box_coder {
        y_scale: 10.0
        x_scale: 10.0
        height_scale: 5.0
        width_scale: 5.0
      }
    }
    matcher {
      argmax_matcher {
        matched_threshold: 0.5
        unmatched_threshold: 0.5
        ignore_thresholds: false
        negatives_lower_than_unmatched: true
        force_match_for_each_row: true
      }
    }
    similarity_calculator {
      iou_similarity {
      }
    }
    anchor_generator {
      ssd_anchor_generator {
        num_layers: 6
        min_scale: 0.2
        max_scale: 0.95
        aspect_ratios: 1.0
        aspect_ratios: 2.0
        aspect_ratios: 0.5
        aspect_ratios: 3.0
        aspect_ratios: 0.3333
      }
    }
    image_resizer {
      fixed_shape_resizer {
        height: 300
        width: 300
      }
    }
    box_predictor {
      convolutional_box_predictor {
        min_depth: 0
        max_depth: 0
        num_layers_before_predictor: 0
        use_dropout: false
        dropout_keep_probability: 0.8
        kernel_size: 1
        box_code_size: 4
        apply_sigmoid_to_scores: false
        conv_hyperparams {
          activation: RELU_6,
          regularizer {
            l2_regularizer {
              weight: 0.00004
            }
          }
          initializer {
            truncated_normal_initializer {
              stddev: 0.03
              mean: 0.0
            }
          }
          batch_norm {
            train: true,
            scale: true,
            center: true,
            decay: 0.9997,
            epsilon: 0.001,
          }
        }
      }
    }
    feature_extractor {
      type: 'ssd_mobilenet_v1'
      min_depth: 16
      depth_multiplier: 1.0
      conv_hyperparams {
        activation: RELU_6,
        regularizer {
          l2_regularizer {
            weight: 0.00004
          }
        }
        initializer {
          truncated_normal_initializer {
            stddev: 0.03
            mean: 0.0
          }
        }
        batch_norm {
          train: true,
          scale: true,
          center: true,
          decay: 0.9997,
          epsilon: 0.001,
        }
      }
    }
    loss {
      classification_loss {
        weighted_sigmoid {
          anchorwise_output: true
        }
      }
      localization_loss {
        weighted_smooth_l1 {
          anchorwise_output: true
        }
      }
      hard_example_miner {
        num_hard_examples: 3000
        iou_threshold: 0.99
        loss_type: CLASSIFICATION
        max_negatives_per_positive: 3
        min_negatives_per_image: 0
      }
      classification_weight: 1.0
      localization_weight: 1.0
    }
    normalize_loss_by_num_matches: true
    post_processing {
      batch_non_max_suppression {
        score_threshold: 1e-8
        iou_threshold: 0.6
        max_detections_per_class: 100
        max_total_detections: 100
      }
      score_converter: SIGMOID
    }
  }
train_config.pbtxt

  batch_size: 24
  optimizer {
    rms_prop_optimizer: {
      learning_rate: {
        exponential_decay_learning_rate {
          initial_learning_rate: 0.004
          decay_steps: 800720
          decay_factor: 0.95
        }
      }
      momentum_optimizer_value: 0.9
      decay: 0.9
      epsilon: 1.0
    }
  }
  fine_tune_checkpoint: "/home/bqh/Code/image_search/model_ckpt/model.ckpt"
  from_detection_checkpoint: false
  # Note: The below line limits the training process to 200K steps, which we
  # empirically found to be sufficient enough to train the pets dataset. This
  # effectively bypasses the learning rate schedule (the learning rate will
  # never decay). Remove the below line to train indefinitely.
  num_steps: 200000
  data_augmentation_options {
    random_horizontal_flip {
    }
  }
  data_augmentation_options {
    ssd_random_crop {
    }
  }
train_input_config.pbtxt

  tf_record_input_reader {
    input_path: "$PATH/train.record"
  }
  label_map_path: "$PATH/label_map.pbtxt"
说明:在model_config文件中,有一项 num_classes: 1,因为这里训练的只是找出图片中的漫画人物的脸所以就一个类别,如果检测的是多目标的话,有几项这里就改成几;这里使用的是SSD检测模型也可以使用别的检测模型,比如faster rcnn(理论请参考 http://blog.csdn.net/luoyang224/article/details/77529536);train_input_config文件中又多出来两个文件的路径,分别是第一步生成的训练集路径和训练集中目标的标签

label_map.pbtxt

item{
	id:1
	name:'person'
}
三、开始训练

我手头没有可供规模大点的数据集使用的GPU,所以就直接在CPU上训练的,不过速度真的太慢了,按照上面的配置文件每个batch 24长图片,大概需要16G的内存,我这里训练了四天多才迭代了进8W次。如果有条件的话还是使用GPU吧!运行命令:python train.py --logtostderr --train_dir=$PATH/model_ckpt --model_config_path=$PATH/config/model_config.pbtxt --train_config_path=$PATH/config/train_config.pbtxt --input_config_path=$PATH/config/train_input_config.pbtxt

四、检测

在API里有个文件export_inference_graph.py可以导出模型,但是运行的时候报错,在网上查了下说是因为TF的版本问题,反正不导出也OK,就直接测试吧。使用eval.py,运行的格式和上面train.py差不多:

python  eval.py --logtostderr --checkpoint_dir=$PATH/model_ckpt --eval_dir=$PATH/eval_dir --eval_config_path=$PATH/config/eval_config.pbtxt --model_config_path=$PATH/config/model_config.pbtxt --input_config_path=$PATH/config/eval_input_config.pbtxt

同样需要准备eval_config.pbtxt、eval_input_config.pbtxt文件

eval_config.pbtxt

num_examples: 2000
  # Note: The below line limits the evaluation process to 10 evaluations.
  # Remove the below line to evaluate indefinitely.
  # max_evals: 10
eval_input_config.pbtxt

  tf_record_input_reader {
    input_path: "$PATH/person_val.record"
  }
  label_map_path: "$PATH/eval_label_map.pbtxt"
  shuffle: false
  num_readers: 1
这里的person_val.record是测试集的tf.record文件,生成方法同第一步;eval_label_map.pbtxt文件同第一步的label_map.pbtxt文件。这里遇见一个问题,源码的utils/object_detection_evaluation.py文件中的第433行有个判断

if groundtruth_is_difficult_list is None:
但是没有判断groundtruth_is_difficult_list 是空的情况,导致groundtruth_is_difficult_list变量没有赋值,后续在utils/per_image_evaluation.py的202行调用的时候会报错,没继续研究下去到底是我配置的问题还是源码这里应该判断一次,我把utils/object_detection_evaluation.py的433行改成了:

if groundtruth_is_difficult_list is None or len(groundtruth_is_difficult_list) == 0:
再运行就没问题了。运行后在$PATH/eval_dir目录下会生成out文件,然后打开tensorboard

tensorboard -log $PATH/eval_dir/ 
在浏览器中查看

可以看到已经把结果显示出来的,之所以有偏差是因为还在训练当中,train.py还没收敛到一个满意的loss,还有就是训练集数据量太少,需要加大样本量,不过已经看到结果了。对于我的图片搜索来说,已经可以区分出图片中是否有人脸啦!

五、模型导出

如同上面说的,直接使用

python export_inference_graph.py --input_type image_tensor --pipeline_config_path $PATH/config/ttadm_ssd_inception.config --trained_checkpoint_prefix $PATH/model_ckpt/model.ckpt-* --output_directory $PATH/exported_model_directory

exporter.py 71行会报错,

rewrite_options = rewriter_config_pb2.RewriterConfig(optimize_tensor_layout=True)
 
 

修改为:

rewrite_options = rewriter_config_pb2.RewriterConfig()

六、执行预测

import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util


class TOD(object):
    def __init__(self):
        self.PATH_TO_CKPT = r'$PATH/exported_model_directory/frozen_inference_graph.pb'
        self.PATH_TO_LABELS = r'$PATH/eval_label_map.pbtxt'
        self.NUM_CLASSES = 1
        self.detection_graph = self._load_model()
        self.category_index = self._load_label_map()

    def _load_model(self):
        detection_graph = tf.Graph()
        with detection_graph.as_default():
            od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:
                serialized_graph = fid.read()
                od_graph_def.ParseFromString(serialized_graph)
                tf.import_graph_def(od_graph_def, name='')
                return detection_graph

    def _load_label_map(self):
        label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
        categories = label_map_util.convert_label_map_to_categories(label_map,
                                                                    max_num_classes=self.NUM_CLASSES,
                                                                    use_display_name=True)
        category_index = label_map_util.create_category_index(categories)
        return category_index

    def detect(self, image):
        with self.detection_graph.as_default():
            with tf.Session(graph=self.detection_graph) as sess:
                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image, axis=0)
                image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
                boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
                scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
                classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
                num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
                # Actual detection.
                (boxes, scores, classes, num_detections) = sess.run(
                    [boxes, scores, classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})
                # Visualization of the results of a detection.
                vis_util.visualize_boxes_and_labels_on_image_array(
                    image,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    self.category_index,
                    use_normalized_coordinates=True,
                    line_thickness=8)

        cv2.namedWindow("detection", cv2.WINDOW_NORMAL)
        cv2.imshow("detection", image)
        cv2.waitKey(0)


def main():
    image = cv2.imread('1.jpg')
    detecotr = TOD()
    detecotr.detect(image)
    pass


if __name__ == '__main__':
    main()









 
 
 
 



猜你喜欢

转载自blog.csdn.net/qq_36810544/article/details/79149424