pytorch 目标检测数据处理比赛使用

摘要

这一部分的讲解是比赛选手所必备的基本操作吧,对数据要有一定的理解,所以我们统计数据类别,合理划分训练集和验证集,直接对json文件分析,刚好对应上上面讲解cocoAPI的操作。

统计xml文件中的类别数量

import os
import xml.etree.ElementTree as ET
from collections import Counter
import numpy as np
a ='work/trainval/VOC2007/Annotations/'
file_names = [image_file_name.split('.')[0]
              for image_file_name in os.listdir(q)]

b = []
for i in range(len(file_names)):
    b.append(file_names[i]+'.xml')
c = []
xml_list = []
for i in range(len(file_names)):
    c = a+b[i]
    tree = ET.parse(c)
    root = tree.getroot()
    for member in root.findall('object'):
        value = (member[0].text)
        xml_list.append(value)

# aa = list(np.unique(xml_list))
num = len(xml_list)

aa = list(Counter(xml_list))
cate = len(aa)
d = np.zeros([cate,1])
print(aa)  #13
for i in range(num):
    for j in range(cate):
        if xml_list[i] == aa[j]:
            d[j]=d[j]+1
print('类别数量:',cate)
for i in range(cate):
    print(aa[i],d[i])

在这里插入图片描述
这是我处理用来分析一下类别个各个类别的数量,好针对较少的类别使用数据增强

划分数据集

这这部操作也很简单,我看我的数据集呈现连续性,所以我就每六次连续的照片中随机选取一次

import os
import random
import shutil
 xml_train = './test/train/'
 a=os.listdir(xml_train)
 i = 0
 while(i<110):#根据自己数据集的数量进行设置,
     random_file = random.choice(a[i:i+6])  #这里如果修改的话下面i的相加也要修改
     print(random_file)
     source_file = "%s/%s" % (xml_train, random_file)
     xml_val = './test/val/'  #这一步是你要将验证集分配的地方
     if random_file not in os.listdir(xml_val):
         shutil.move(source_file, xml_val)
         i=i+6

生成json文件

目前都是数据生成json文件,使用方便快捷,比起以前的xml,txt文件要快捷很多,

import os
import cv2
import json
import xml.dom.minidom
import xml.etree.ElementTree as ET

data_dir = './data' #根目录文件,其中包含image文件夹和box文件夹(根据自己的情况修改这个路径)

image_file_dir ='work/val/'
xml_file_dir = 'work/trainval/VOC2007/Annotations/'

annotations_info = {'images': [], 'annotations': [], 'categories': []}

categories_map = {'一次性快餐盒':0,'书籍纸张':1,   '充电宝':2,'剩饭剩菜':3,    '包':4,'垃圾桶':5,
                      '塑料器皿':6,'塑料玩具':7, '塑料衣架':8,  '大骨头':9,'干电池':10,
                     '快递纸袋':11,'插头电线':12,  '旧衣服':13,'易拉罐':14,  '枕头':15,
                    '果皮果肉':16, '毛绒玩具':17,'污损塑料':18,'污损用纸':19,'洗护用品':20,
                        '烟蒂':21,    '牙签':22, '玻璃器皿':23,  '砧板':24,  '筷子':25,
                    '纸盒纸箱':26,    '花盆':27,   '茶叶渣':28,'菜帮菜叶':29, '蛋壳':30,
                      '调料瓶':31,   '软膏':32,  '过期药物':33,  '酒瓶':34, '金属厨具':35,
                    '金属器皿':36,'金属食品罐':37,     '锅':38, '陶瓷器皿':39,   '鞋':40,
                    '食用油桶':41,  '饮料瓶':42,    '鱼骨':43}

for key in categories_map:
    categoriy_info = {"id":categories_map[key], "name":key}
    annotations_info['categories'].append(categoriy_info)

file_names = [image_file_name.split('.')[0]
              for image_file_name in os.listdir(image_file_dir)]
ann_id = 1
for i, file_name in enumerate(file_names):
    print(i)
    image_file_name = file_name + '.jpg'
    xml_file_name = file_name + '.xml'
    image_file_path = os.path.join(image_file_dir, image_file_name)
    xml_file_path = os.path.join(xml_file_dir, xml_file_name)

    image_info = dict()
    image = cv2.cvtColor(cv2.imread(image_file_path), cv2.COLOR_BGR2RGB)
    height, width, _ = image.shape
    image_info = {'file_name': image_file_name, 'id': i+1,
                  'height': height, 'width': width}
    annotations_info['images'].append(image_info)

    DOMTree = xml.dom.minidom.parse(xml_file_path)
    collection = DOMTree.documentElement

    names = collection.getElementsByTagName('name')
    names = [name.firstChild.data for name in names]

    xmins = collection.getElementsByTagName('xmin')
    xmins = [xmin.firstChild.data for xmin in xmins]
    ymins = collection.getElementsByTagName('ymin')
    ymins = [ymin.firstChild.data for ymin in ymins]
    xmaxs = collection.getElementsByTagName('xmax')
    xmaxs = [xmax.firstChild.data for xmax in xmaxs]
    ymaxs = collection.getElementsByTagName('ymax')
    ymaxs = [ymax.firstChild.data for ymax in ymaxs]

    object_num = len(names)

    for j in range(object_num):
        if names[j] in categories_map:
            image_id = i + 1
            x1,y1,x2,y2 = int(xmins[j]),int(ymins[j]),int(xmaxs[j]),int(ymaxs[j])
            x1,y1,x2,y2 = x1 - 1,y1 - 1,x2 - 1,y2 - 1

            if x2 == width:
                x2 -= 1
            if y2 == height:
                y2 -= 1

            x,y = x1,y1
            w,h = x2 - x1 + 1,y2 - y1 + 1
            category_id = categories_map[names[j]]
            area = w * h
            annotation_info = {"id": ann_id, "image_id":image_id, "bbox":[x, y, w, h], "category_id": category_id, "area": area,"iscrowd": 0}
            annotations_info['annotations'].append(annotation_info)
            ann_id += 1

with  open('./data/val.json', 'w')  as f:
    json.dump(annotations_info, f, indent=4)

print('---整理后的标注文件---')
print('所有图片的数量:',  len(annotations_info['images']))
print('所有标注的数量:',  len(annotations_info['annotations']))
print('所有类别的数量:',  len(annotations_info['categories']))

分析框的大小

import json
from pycocotools.coco import COCO
from collections import Counter
import numpy as np
coco = COCO('data/val.json')
ids = list(coco.imgs.keys())
num = len(ids)

sum = []

for i in range(num):
    img_id = ids[i]
    height = coco.loadImgs(img_id)[0]['height']
    width = coco.loadImgs(img_id)[0]['width']
    value = (height,width)
    sum.append(value)

nums = len(sum)
d =np.array([[300,0],[500,0],[700,0],[1000,0],[1300,0],[1500,0],[2000,0],[5000,0]])
print('全部的数量:',nums)
cate = len(d)
for i in range(nums):
    for j in range(cate):
        if sum[i][0] <= d[j][0]:
            d[j][1]=d[j][1]+1

for i in range(cate-1):
    d[cate-i-1][1]=d[cate-1-i][1]-d[cate-2-i][1]

for i in range(cate):
    print(d[i][0],d[i][1])

在这里插入图片描述
这一步我这样写主要是框的大小尺寸有6千多种,所以直接就按固定范围统计了,

在目标检测中提取分类数据

为什么我会有这种操作,因为我看自己的数据集大都是一张照片一种类别,很适合分类训练前置网络。直接接到json文件就可以了。

from pycocotools.coco import COCO
import pandas as pd
# 'G:\detection\cv\data2/annotations/val.json'
# ./test/annotations.json
coco = COCO('./test/all.json')
ids1 = coco.getAnnIds()
ids2 = coco.getImgIds()
ids3 = coco.getCatIds()
# data1=coco.loadAnns(ids1[1])
# print(data1)
# data2=coco.loadImgs(ids3[3])
# print(data2)
# data3=coco.loadCats(ids3[35])
# print(data3)
# ['category_id']['category_id']
name = []
kind = []
n=0
for i in range(len(ids2)):
    num =coco.getAnnIds(ids2[i])
    # print(num)
    cate=[]
    if len(num)==1:
        data=coco.loadAnns(ids1[i])
        imgdata = coco.loadImgs(ids2[i])
        cate.append(data[0]['category_id'])
        # print(cate)
        name.append(imgdata[0]['file_name'])
        kind.append(data[0]['category_id'])
        cate = []
        n=n+1
print(n)
print(len(ids2))
print(name)
print(kind)
csv = []
for i in range(len(name)):
    na = name[i]
    kd = kind[i]
    value=(na,kd)
    csv.append(value)
column_name =['filename','label']
print(csv)
xml = pd.DataFrame(csv,columns=column_name)

print(xml)
xml.to_csv('./test/all.csv',index=None)

生成的csv文件也是一般分类比赛常用的格式。

总结

这只是一些简单的基础操作,可以给新手一些启发,掌握csv文件使用和json文件分析你可以随心所欲处理数据。

猜你喜欢

转载自blog.csdn.net/cp1314971/article/details/106272277
今日推荐