【转换格式脚本】
- 1. yolo2coco.py
- 2. visdrone2coco.py
- 3. 把yolov5 detect的输出转成coco
- 4. visdrone-MOT转coco
1. yolo2coco.py
import imp
import json
import cv2
import os
bnd_id_start = 0
times = 0
json_dict = {
"images" : [],
"type" : "instances",
"annotations": [],
"categories" : []
}
raw_images_path = 'images'
raw_labels_path = 'labels'
data = os.listdir(raw_images_path)
bnd_id = bnd_id_start
classes = ["your dataset categories"]
classes_count_obj = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0, 15:0, 16:0, 17:0, 18:0, 19:0, 20:0, 21:0}
classes_count_imgs = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0, 12:0, 13:0, 14:0, 15:0, 16:0, 17:0, 18:0, 19:0, 20:0, 21:0}
for d in data:
filename = d
img = cv2.imread(os.path.join(raw_images_path, filename))
txtFile = filename.replace('jpeg', 'txt').replace('jpg', 'txt')
try:
height,width = img.shape[0],img.shape[1]
image_id = filename.split(".")[0]
except:
times += 1
print('file is error')
image = {
'file_name' : filename,
'height' : height,
'width' : width,
'id' : image_id
}
json_dict['images'].append(image)
with open(os.path.join(raw_labels_path, txtFile), 'r') as fr:
labelList = fr.readlines()
cls_list = set()
for c in labelList:
label, xmin, ymin, w, h = c.strip().split(" ")
label = int(label)
classes_count_obj[label] += 1
cls_list.add(label)
xmin = float(xmin)
ymin = float(ymin)
w = float(w)
h = float(h)
x1 = width * xmin - 0.5 * width * w
y1 = height * ymin - 0.5 * height * h
x2 = width * xmin + 0.5 * width * w
y2 = height * ymin + 0.5 * height * h
o_width = abs(x2 - x1)
o_height = abs(y2 - y1)
area = o_width * o_height
annotation = {
'area' : area,
'iscrowd' : 0,
'image_id' : image_id,
'bbox' :[x1, y1, o_width,o_height],
'category_id' : label,
'id' : bnd_id,
'ignore' : 0,
'segmentation' : [[x1, y1, x1 + o_width, y1, x1 + o_width, y1 + o_height, x1, y1 + o_height]]
}
json_dict['annotations'].append(annotation)
bnd_id += 1
for i in cls_list:
if i in classes_count_imgs:
classes_count_imgs[i] += 1
for i in range(len(classes)):
cate = classes[i]
cid = i
category = {
'supercategory' : 'none',
'id' : cid,
'name' : cate
}
json_dict['categories'].append(category)
json_fp = open("your json path",'w')
json_str = json.dumps(json_dict, indent=4)
json_fp.write(json_str)
json_fp.close()
print("cls obj nums:", classes_count_obj)
print()
print("cls img nums:", classes_count_imgs)
2. visdrone2coco.py
import os
import cv2
from tqdm import tqdm
import json
import uuid
from os.path import join as opj
import shutil
mapping = json.load(open(os.path.join("uuid_mapping.json")))
def convert_to_cocodetection(dir, output_dir):
train_dir = opj(dir, "VisDrone2019-DET-train")
val_dir = opj(dir, "VisDrone2019-DET-val")
test_dir = opj(dir, "VisDrone2019-DET-test-dev")
train_annotations = opj(train_dir, "annotations")
val_annotations = opj(val_dir, "annotations")
test_annotations = opj(test_dir, "annotations")
train_images = opj(train_dir, "images")
val_images = opj(val_dir, "images")
test_images = opj(test_dir, "images")
id_num = 0
num = 0
categories = [
{"supercategory": "ignored regions", "id": 0, "name": "ignored regions"},
{"supercategory": "pedestrian", "id": 1, "name": "pedestrian"},
{"supercategory": "people", "id": 2, "name": "people"},
{"supercategory": "bicycle", "id": 3, "name": "bicycle"},
{"supercategory": "car", "id": 4, "name": "car"},
{"supercategory": "van", "id": 5, "name": "van"},
{"supercategory": "truck", "id": 6, "name": "truck"},
{"supercategory": "tricycle", "id": 7, "name": "tricycle"},
{"supercategory": "awning-tricycle", "id": 8, "name": "awning-tricycle"},
{"supercategory": "bus", "id": 9, "name": "bus"},
{"supercategory": "motor", "id": 10, "name": "motor"},
{"supercategory": "others", "id": 11, "name": "others"}
]
for mode in ["train", "val", "test"]:
images = []
annotations = []
print(f"start loading {mode} data...")
if mode == "train":
set = os.listdir(train_annotations)
annotations_path = train_annotations
images_path = train_images
elif mode == "test":
set = os.listdir(test_annotations)
annotations_path = test_annotations
images_path = test_images
else:
set = os.listdir(val_annotations)
annotations_path = val_annotations
images_path = val_images
for i in tqdm(set):
f = open(opj(annotations_path, i), "r")
name = i.replace(".txt", "")
image = {}
height, width = cv2.imread(opj(images_path, name + ".jpg")).shape[:2]
file_name = opj("images", "visdrone", "raw", mapping[name] + ".jpg")
image["file_name"] = file_name
image["coco_url"] = ""
image["height"] = height
image["width"] = width
image["id"] = mapping[name]
images.append(image)
for line in f.readlines():
annotation = {}
line = line.replace("\n", "")
if line.endswith(","):
line = line.rstrip(",")
line_list = [int(i) for i in line.split(",")]
category_id = line_list[5]
bbox_xywh = [line_list[0], line_list[1], line_list[2], line_list[3]]
annotation["image_id"] = mapping[name]
annotation["bbox"] = bbox_xywh
annotation["category_id"] = category_id
annotation["id"] = id_num
annotation["iscrowd"] = 0
annotation["segmentation"] = [[bbox_xywh[0],bbox_xywh[1],bbox_xywh[0]+bbox_xywh[2],bbox_xywh[1],bbox_xywh[0]+bbox_xywh[2],bbox_xywh[1]+bbox_xywh[3],bbox_xywh[0],bbox_xywh[1]+bbox_xywh[3]]]
annotation["area"] = bbox_xywh[2] * bbox_xywh[3]
id_num += 1
annotations.append(annotation)
num += 1
dataset_dict = {}
dataset_dict["images"] = images
dataset_dict["annotations"] = annotations
dataset_dict["categories"] = categories
dataset_dict["info"] = dict()
dataset_dict["licenses"] = dict()
json_str = json.dumps(dataset_dict, indent=4)
with open(f'{output_dir}/split_{mode}.json', 'w') as json_file:
json_file.write(json_str)
print("json file write done...")
def get_test_namelist(dir, out_dir):
full_path = out_dir + "/" + "test.txt"
file = open(full_path, 'w')
for name in tqdm(os.listdir(dir)):
name = name.replace(".txt", "")
file.write(name + "\n")
file.close()
return None
def centerxywh_to_xyxy(boxes):
"""
args:
boxes:list of center_x,center_y,width,height,
return:
boxes:list of x,y,x,y,cooresponding to top left and bottom right
"""
x_top_left = boxes[0] - boxes[2] / 2
y_top_left = boxes[1] - boxes[3] / 2
x_bottom_right = boxes[0] + boxes[2] / 2
y_bottom_right = boxes[1] + boxes[3] / 2
return [x_top_left, y_top_left, x_bottom_right, y_bottom_right]
def centerxywh_to_topleftxywh(boxes):
"""
args:
boxes:list of center_x,center_y,width,height,
return:
boxes:list of x,y,x,y,cooresponding to top left and bottom right
"""
x_top_left = boxes[0] - boxes[2] / 2
y_top_left = boxes[1] - boxes[3] / 2
width = boxes[2]
height = boxes[3]
return [x_top_left, y_top_left, width, height]
def clamp(coord, width, height):
if coord[0] < 0:
coord[0] = 0
if coord[1] < 0:
coord[1] = 0
if coord[2] > width:
coord[2] = width
if coord[3] > height:
coord[3] = height
return coord
if __name__ == '__main__':
convert_to_cocodetection(r"your src path",r"your output path")
3. 把yolov5 detect的输出转成coco
import json
import cv2
import os
times = 0
pred_json = []
json_dict = {
"image_id": int,
"bbox": list,
"category_id" : int,
"score": float
}
raw_images_path = 'images'
raw_labels_path = 'labels'
data = os.listdir(raw_images_path)
for d in data:
filename = d
img = cv2.imread(os.path.join(raw_images_path, filename))
txtFile = filename.replace('jpeg', 'txt').replace('jpg', 'txt')
try:
height,width = img.shape[0],img.shape[1]
image_id = filename.split(".")[0]
except:
times += 1
print('file is error')
if not os.path.exists(os.path.join(raw_labels_path, txtFile)):
continue
with open(os.path.join(raw_labels_path, txtFile), 'r') as fr:
labelList = fr.readlines()
for c in labelList:
label, xmin, ymin, w, h, score = c.strip().split(" ")
label = int(label)
xmin = float(xmin)
ymin = float(ymin)
w = float(w)
h = float(h)
score = float(score)
x1 = width * xmin - 0.5 * width * w
y1 = height * ymin - 0.5 * height * h
x2 = width * xmin + 0.5 * width * w
y2 = height * ymin + 0.5 * height * h
json_dict['category_id'] = label
json_dict['image_id'] = image_id
json_dict['score'] = score
box = [x1, y1, x2, y2]
json_dict['bbox'] = box
pred_json.append(json_dict)
print(len(pred_json))
json_fp = open("best_predictions.json",'w')
json_str = json.dumps(pred_json, indent=4)
json_fp.write(json_str)
json_fp.close()
4. visdrone-MOT转coco
"""
YOLO 格式的数据集转化为 COCO 格式的数据集
--root_dir 输入根路径
--save_path 保存文件的名字
"""
import os
import cv2
import json
from tqdm import tqdm
import argparse
import progressbar
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='VisDrone2019/Test',type=str, help="root path of images and labels, include ./images and ./labels and classes.txt")
parser.add_argument('--save_path', type=str,default='annotations/test.json', help="if not split the dataset, give a path to a json file")
arg = parser.parse_args()
def yolo2coco(arg):
root_path = arg.root_dir
print("Loading data from ",root_path)
assert os.path.exists(root_path)
originLabelsDir = os.path.join(root_path, 'labels')
originImagesDir = os.path.join(root_path, 'JPEGImages')
classes = '0'
indexes = os.listdir(originImagesDir)
dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
ann_id_cnt = 0
for k, index in enumerate(tqdm(indexes)):
txtFile = index.replace('images','txt').replace('.jpg','.txt').replace('.png','.txt')
im = cv2.imread(os.path.join(root_path, 'JPEGImages/') + index)
height, width, _ = im.shape
dataset['images'].append({'file_name': index,
'id': k,
'width': width,
'height': height})
with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
labelList = fr.readlines()
if len(labelList) == 0:
dataset['annotations'].append({
'image_name': index[:-4],
'area': 0,
'bbox': [],
'category_id': 0,
'id': ann_id_cnt,
'image_id': k,
'iscrowd': 0,
'segmentation': []
})
ann_id_cnt += 1
else:
for label in labelList:
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
H, W, _ = im.shape
x1 = (x - w / 2)
y1 = (y - h / 2)
x2 = (x + w / 2)
y2 = (y + h / 2)
cls_id = int(label[0])
width = max(0, x2 - x1)
height = max(0, y2 - y1)
dataset['annotations'].append({
'image_name': index[:-4],
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'id': ann_id_cnt,
'image_id': k,
'iscrowd': 0,
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
ann_id_cnt += 1
folder = os.path.join("VisDrone2019/", 'annotations')
if not os.path.exists(folder):
os.makedirs(folder)
json_name = os.path.join("VisDrone2019/", arg.save_path)
with open(json_name, 'w') as f:
json.dump(dataset, f, indent=4)
print('Save annotation to {}'.format(json_name))
if __name__ == "__main__":
yolo2coco(arg)