项目地址:make-your-yolov5_dataset
更多的标注工具你可以去看:深度学习图像标注工具汇总 、 十个最常用深度学习图像/视频数据标注工具 、 百度一下
在这里我选择使用labelImg
labelimg.exe
标注完成后即可获得
VOC
格式(生成.xml文件)的数据
目录结构:
dataset
│
├─Annotations
│ train_29635.xml
│ train_29641.xml
│ train_30090.xml
│ ...
│
└─JPEGImages
train_29635.jpg
train_29641.jpg
train_30090.jpg
...
运行代码之前,修改里面相关文件路径。
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
# 数据标签
classes = ['window_shielding', 'multi_signs', 'non_traffic_sign']
def convert(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
if w>=1:
w=0.99
if h>=1:
h=0.99
return (x,y,w,h)
def convert_annotation(rootpath,xmlname):
xmlpath = rootpath + '/Annotations'
xmlfile = os.path.join(xmlpath,xmlname)
with open(xmlfile, "r", encoding='UTF-8') as in_file:
txtname = xmlname[:-4]+'.txt'
print(txtname)
txtpath = rootpath + '/worktxt' #生成的.txt文件会被保存在worktxt目录下
if not os.path.exists(txtpath):
os.makedirs(txtpath)
txtfile = os.path.join(txtpath,txtname)
with open(txtfile, "w+" ,encoding='UTF-8') as out_file:
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
out_file.truncate()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
if __name__ == "__main__":
rootpath='dataset/voc/'
xmlpath=rootpath+'/Annotations'
list=os.listdir(xmlpath)
for i in range(0,len(list)) :
path = os.path.join(xmlpath,list[i])
if ('.xml' in path)or('.XML' in path):
convert_annotation(rootpath,list[i])
print('done', i)
else:
print('not xml file',i)
目录结构:
dataset
│
├─Annotations
│ train_29635.xml
│ train_29641.xml
│ train_30090.xml
│ ...
│
├─JPEGImages
│ train_29635.jpg
│ train_29641.jpg
│ train_30090.jpg
│ ...
│
└─worktxt
train_29635.txt
train_29641.txt
train_30090.txt
...
import os
import random
import sys
root_path = 'dataset/voc'
xmlfilepath = root_path + '/Annotations'
txtsavepath = root_path + '/ImageSets/Main'
if not os.path.exists(txtsavepath):
os.makedirs(txtsavepath)
train_test_percent = 0.9 # (训练集+验证集)/(训练集+验证集+测试集)
train_valid_percent = 0.9 # 训练集/(训练集+验证集)
total_xml = os.listdir(xmlfilepath)
num = len(total_xml)
list = range(num)
tv = int(num * train_test_percent) # 训练集+验证集数量
ts = int(num-tv) # 测试集数量
tr = int(tv * train_valid_percent) # 验证集数量
tz = int(tv-tr) # 训练集数量
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)
print("train and valid size:", tv)
print("train size:", tz)
print("test size:", ts)
print("valid size:", tr)
# ftrainall = open(txtsavepath + '/ftrainall.txt', 'w')
ftest = open(txtsavepath + '/test.txt', 'w')
ftrain = open(txtsavepath + '/train.txt', 'w')
fvalid = open(txtsavepath + '/valid.txt', 'w')
ftestimg = open(txtsavepath + '/img_test.txt', 'w')
ftrainimg = open(txtsavepath + '/img_train.txt', 'w')
fvalidimg = open(txtsavepath + '/img_valid.txt', 'w')
for i in list:
name = total_xml[i][:-4] + '.txt' + '\n'
imgname = total_xml[i][:-4] + '.jpg' + '\n'
if i in trainval:
# ftrainall.write(name)
if i in train:
ftrain.write(name)
ftrainimg.write(imgname)
else:
fvalid.write(name)
fvalidimg.write(imgname)
else:
ftest.write(name)
ftestimg.write(imgname)
# ftrainall.close()
ftrain.close()
fvalid.close()
ftest.close()
ftrainimg.close()
fvalidimg.close()
ftestimg.close()
目录结构:
dataset
│
├─Annotations
│ train_29635.xml
│ train_29641.xml
│ train_30090.xml
│ ...
│
├─ImageSets
│ └─Main
│ train.txt
│ test.txt
│ valid.txt
│ img_train.txt
│ img_test.txt
│ img_valid.txt
│
├─JPEGImages
│ train_29635.jpg
│ train_29641.jpg
│ train_30090.jpg
│ ...
│
└─worktxt
train_29635.txt
train_29641.txt
train_30090.txt
...
import os
import shutil
# 获取分割好的train\test\valid名称
img_txt_cg_train = []
img_txt_cg_test = []
img_txt_cg_valid = []
label_txt_cg_train = []
label_txt_cg_test = []
label_txt_cg_valid = []
path = 'dataset/voc/ImageSets/Main/'
for line in open(path+"img_train.txt"):
line=line.strip('\n')
img_txt_cg_train.append(line)
for line1 in open(path+"img_test.txt"):
line1=line1.strip('\n')
img_txt_cg_test.append(line1)
for line2 in open(path+"img_valid.txt"):
line2=line2.strip('\n')
img_txt_cg_valid.append(line2)
for line3 in open(path+"train.txt"):
line3=line3.strip('\n')
label_txt_cg_train.append(line3)
for line4 in open(path+"test.txt"):
line4=line4.strip('\n')
label_txt_cg_test.append(line4)
for line5 in open(path+"valid.txt"):
line5=line5.strip('\n')
label_txt_cg_valid.append(line5)
# 建立cg数据的文件夹
new_dataset_train = 'dataset/voc/data/train/images/'
new_dataset_test = 'dataset/voc/data/test/images/'
new_dataset_valid = 'dataset/voc/data/valid/images/'
new_dataset_trainl = 'dataset/voc/data/train/labels/'
new_dataset_testl = 'dataset/voc/data/test/labels/'
new_dataset_validl = 'dataset/voc/data/valid/labels/'
if not os.path.exists(new_dataset_train):
os.makedirs(new_dataset_train)
if not os.path.exists(new_dataset_test):
os.makedirs(new_dataset_test)
if not os.path.exists(new_dataset_valid):
os.makedirs(new_dataset_valid)
if not os.path.exists(new_dataset_trainl):
os.makedirs(new_dataset_trainl)
if not os.path.exists(new_dataset_testl):
os.makedirs(new_dataset_testl)
if not os.path.exists(new_dataset_validl):
os.makedirs(new_dataset_validl)
# cg移动
fimg = 'dataset/voc/JPEGImages/'
flable = 'dataset/voc/worktxt/'
# 小数据建议:copy 大数据建议:move
for i in range(len(img_txt_cg_train)):
shutil.copy(fimg+str(img_txt_cg_train[i]),new_dataset_train)
shutil.copy(flable+str(label_txt_cg_train[i]),new_dataset_trainl)
for j in range(len(img_txt_cg_test)):
shutil.copy(fimg+str(img_txt_cg_test[j]),new_dataset_test)
shutil.copy(flable+str(label_txt_cg_test[j]),new_dataset_testl)
for q in range(len(img_txt_cg_valid)):
shutil.copy(fimg+str(img_txt_cg_valid[q]),new_dataset_valid)
shutil.copy(flable+str(label_txt_cg_valid[q]),new_dataset_validl)
主要进行一个copy
或者move
的操作:
# 小数据建议:copy 大数据建议:move
for i in range(len(img_txt_cg_train)):
shutil.copy(fimg+str(img_txt_cg_train[i]),new_dataset_train)
shutil.copy(flable+str(label_txt_cg_train[i]),new_dataset_trainl)
for j in range(len(img_txt_cg_test)):
shutil.copy(fimg+str(img_txt_cg_test[j]),new_dataset_test)
shutil.copy(flable+str(label_txt_cg_test[j]),new_dataset_testl)
for q in range(len(img_txt_cg_valid)):
shutil.copy(fimg+str(img_txt_cg_valid[q]),new_dataset_valid)
shutil.copy(flable+str(label_txt_cg_valid[q]),new_dataset_validl)
目录结构:
dataset
│
├─Annotations
│ train_29635.xml
│ train_29641.xml
│ train_30090.xml
│ ...
│
├─ImageSets
│ └─Main
│ train.txt
│ test.txt
│ valid.txt
│ img_train.txt
│ img_test.txt
│ img_valid.txt
│
├─data
│ ├─train
│ ├─test
│ └─valid
│
├─JPEGImages
│ train_29635.jpg
│ train_29641.jpg
│ train_30090.jpg
│ ...
│
└─worktxt
train_29635.txt
train_29641.txt
train_30090.txt
...
git clone https://github.com/ultralytics/yolov5.git