背景说明:
在人工智能领域目标检测的时候,我们会搜集大量的样本图片并进行图片标注,然后做成自己想要的数据集。通常做成的数据集,一个文件夹下面存放我们搜集的图片,另一个存放标注文档,受限于标注软件的使用,大部分标注软件标注后生成的标注档是xml格式,有时候根据模型需要,我们需要将整天样本数据划分为数据集和测试集,同事需要将xml格式的标注文档转换为json文档,本文主要实现以上的数据集划分和数据集格式转换功能。
数据集准备:
在自己建立的数据集文件夹下,应该有两个这样的文档,其中Annotations存放的是每一副图片的标注文档,xml格式的;JPEGImages存放搜集的样本图片。
数据集序列划分:
# -*- coding:utf-8 -*-
# 功能:将xml文件分成训练集和测试集
'''
import os, random, shutil
# 训练集和数据集的比例
train_percent = 0.8
# 要转换的xml文件相对路劲位置
xmlfilepath = './Annotations'
# 换分训练集和测试集后的路劲存储位置
txtsavepath = './Main'
# 判断划分后的训练和数据集保存路径是否存在,不存在则声称文件夹
if not os.path.exists(txtsavepath):
os.makedirs(txtsavepath)
# 统计xml个数并转换成列表
total_xml = os.listdir(xmlfilepath)
#print(total_xml)
num=len(total_xml)
list=range(num)
#print(list)
# 计算训练集个数
tv=int(num*train_percent)
# 随机取样
train=random.sample(list,tv)
#print(train)
# 建立或打开要写入的训练或验证集文档
ftrain = open('./Main/train1.txt', 'w')
fval = open('./Main/val1.txt', 'w')
# 写入数据
for i in list:
name=total_xml[i][:-4]+'\n'
if i in train:
ftrain.write(name)
else:
fval.write(name)
# 关闭文件
ftrain.close()
fval.close()
# 上面的执行完成,会在Main文件夹下生成相应的训练和验证集txt档
执行完毕后会在当前目录下Main文件夹中生成train1.txt和val1.txt两个文件。
数据集图片划分:
根据txt文件中的随机生成的序列,划分对应的图片和xml文档。
import os
import shutil
if __name__ == '__main__':
fileDir = "./JPEGImages/" # 源图片文件夹路径
trainDir = './trainDir/train2017/' # 移动到新的文件夹路径
valDir = './valDir/val2017/'
if not os.path.exists(trainDir):
os.makedirs(trainDir)
if not os.path.exists(valDir):
os.makedirs(valDir)
train = []
with open('./Main/train1.txt', 'r') as f:
for line in f:
train.append(line.strip('\n'))
for name in train:
shutil.copy2(fileDir + name + '.jpg', trainDir + name + '.jpg')
val = []
with open('./Main/val1.txt', 'r') as f:
for line in f:
val.append(line.strip('\n'))
for name in val:
shutil.copy2(fileDir + name + '.jpg', valDir + name + '.jpg')
fileDir = "./Annotations/" # 源图片文件夹路径
trainDir_xml = './xml/xml_train/' # 移动到新的文件夹路径
valDir_xml = './xml/xml_val/'
if not os.path.exists(trainDir_xml):
os.makedirs(trainDir_xml)
if not os.path.exists(valDir_xml):
os.makedirs(valDir_xml)
train = []
with open('./Main/train1.txt', 'r') as f:
for line in f:
train.append(line.strip('\n'))
# print(train)
for name in train:
shutil.copy2(fileDir + name + '.xml', trainDir_xml + name + '.xml')
val = []
with open('./Main/val1.txt', 'r') as f:
for line in f:
val.append(line.strip('\n'))
# print(train)
for name in val:
shutil.copy2(fileDir + name + '.xml', valDir_xml + name + '.xml')
print('新的数据集划分完毕!')
代码运行后,会在当前目录下生成trainDir,valDir,xml文件夹,其中trainDir存放训练集图片,valDir存放验证集图片,xml文件夹存放对应的标注档案。
数据格式转换:
# -*- coding:utf-8 -*-
# 功能:将划分好的训练集和测试集xml文件生成对应的json格式文件
import sys
import os
import json
import xml.etree.ElementTree as ET
import glob
import shutil
START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = {'Collision':1, 'Dirty':2, 'Scratch':3}
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise ValueError("Can not find %s in %s." % (name, root.tag))
if length > 0 and len(vars) != length:
raise ValueError(
"The size of %s is supposed to be %d, but is %d."
% (name, length, len(vars))
)
if length == 1:
vars = vars[0]
return vars
def get_filename_as_integer(filename):
filename = filename.replace("\\", "/")
filename = os.path.splitext(os.path.basename(filename))[0]
filename1 = filename.split('_')
filename2 = ''
for i in range(len(filename1)):
filename2 += filename1[i]
return int(filename2[-5:])
def get_categories(xml_files):
"""Generate category name to id mapping from a list of xml files.
Arguments:
xml_files {list} -- A list of xml file paths.
Returns:
dict -- category name to id mapping.
"""
classes_names = []
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall("object"):
classes_names.append(member[0].text)
classes_names = list(set(classes_names))
classes_names.sort()
return {name: i for i, name in enumerate(classes_names)}
def convert(xml_files, json_file):
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
if PRE_DEFINE_CATEGORIES is not None:
categories = PRE_DEFINE_CATEGORIES
else:
categories = get_categories(xml_files)
bnd_id = START_BOUNDING_BOX_ID
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
path = get(root, "path")
if len(path) == 1:
filename = os.path.basename(path[0].text)
elif len(path) == 0:
filename = get_and_check(root, "filename", 1).text
else:
raise ValueError("%d paths found in %s" % (len(path), xml_file))
## The filename must be a number
# image_id = get_filename_as_int(filename)
image_id = get_filename_as_integer(filename)
size = get_and_check(root, "size", 1)
width = int(get_and_check(size, "width", 1).text)
height = int(get_and_check(size, "height", 1).text)
image = {
"file_name": filename,
"height": height,
"width": width,
"id": image_id,
}
json_dict["images"].append(image)
## Currently we do not support segmentation.
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, "object"):
category = get_and_check(obj, "name", 1).text
if category not in categories:
new_id = len(categories)
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, "bndbox", 1)
xmin = int(get_and_check(bndbox, "xmin", 1).text) - 1
ymin = int(float((get_and_check(bndbox, "ymin", 1).text))) - 1
xmax = int(get_and_check(bndbox, "xmax", 1).text)
ymax = int(get_and_check(bndbox, "ymax", 1).text)
assert xmax > xmin
assert ymax > ymin
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {
"area": o_width * o_height,
"iscrowd": 0,
"image_id": image_id,
"bbox": [xmin, ymin, o_width, o_height],
"category_id": category_id,
"id": bnd_id,
"ignore": 0,
"segmentation": [],
}
json_dict["annotations"].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {"supercategory": "none", "id": cid, "name": cate}
json_dict["categories"].append(cat)
os.makedirs(os.path.dirname(json_file), exist_ok=True)
json_fp = open(json_file, "w")
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Convert Pascal VOC annotation to COCO format."
)
parser.add_argument("--xml_dir_train", default='./xml/xml_train/',
help="Directory path to xml files.", type=str)
parser.add_argument("--json_file_train", default='./annotationsjson/instances_train2017.json',
help="Output COCO format json file.", type=str)
parser.add_argument("--xml_dir_val", default='./xml/xml_val/',
help="Directory path to xml files.", type=str)
parser.add_argument("--json_file_val",
default='./annotationsjson/instances_val2017.json',
help="Output COCO format json file.", type=str)
args = parser.parse_args()
xml_files_train = glob.glob(os.path.join(args.xml_dir_train, "*.xml"))
xml_files_val = glob.glob(os.path.join(args.xml_dir_val, "*.xml"))
# If you want to do train/test split, you can pass a subset of xml files to convert function.
print("Number of xml files: {}".format(len(xml_files_train)))
convert(xml_files_train, args.json_file_train)
print("Success: {}".format(args.json_file_train))
print("Number of xml files: {}".format(len(xml_files_val)))
convert(xml_files_val, args.json_file_val)
print("Success: {}".format(args.json_file_val))
dir_path1 = './ImageSets'
dir_path2 = './xml/'
try:
shutil.rmtree(dir_path1)
shutil.rmtree(dir_path2)
except OSError as e:
print("Error: %s : %s" % (dir_path1, e.strerror))
print("Error: %s : %s" % (dir_path2, e.strerror))
print('数据文件转换完成!')
代码运行后,生成annotationsjson问价夹存放对应的训练集和测试集的json标签。