目标检测的标注文件格式互相转化

  1. .odgt转.json(coco)
import os
import json
from PIL import Image

def load_file(fpath):#fpath是具体的文件 ,作用:#str to list
    assert os.path.exists(fpath)  #assert() raise-if-not
    with open(fpath,'r') as fid:
        lines = fid.readlines()
    records = [json.loads(line.strip('\n')) for line in lines] #str to list
    return records


def crowdhuman2coco(odgt_path, json_path):  # 一个输入文件路径,一个输出文件路径
    records = load_file(odgt_path)  # 提取odgt文件数据
    # 预处理
    json_dict = {"images": [], "annotations": [], "categories": []}  # 定义一个字典,coco数据集标注格式
    START_B_BOX_ID = 1  # 设定框的起始ID
    image_id = 1  # 每个image的ID唯一,自己设定start,每次++
    bbox_id = START_B_BOX_ID
    image = {}  # 定义一个字典,记录image
    annotation = {}  # 记录annotation
    categories = {}  # 进行类别记录
    record_list = len(records)  # 获得record的长度,循环遍历所有数据。
    print(record_list)
    # 一行一行的处理。
    for i in range(record_list):
        file_name = records[i]['ID'] + '.jpg'  # 这里是字符串格式  eg.273278,600e5000db6370fb
        # image_id = int(records[i]['ID'].split(",")[0])   # 这样会导致id唯一,要自己设定
        im = Image.open("D:/DataSet/CrowdHuman/CrowdHuman_train/Images/" + file_name)
        # 根据文件名,获取图片,这样可以获取到图片的宽高等信息。因为再odgt数据集里,没有宽高的字段信息。
        image = {'file_name': file_name, 'height': im.size[1], 'width': im.size[0],
                 'id': image_id}  # im.size[0],im.size[1]分别是宽高
        json_dict['images'].append(image)  # 这一步完成一行数据到字典images的转换。

        gt_box = records[i]['gtboxes']
        gt_box_len = len(gt_box)  # 每一个字典gtboxes里,也有好几个记录,分别提取记录。
        for j in range(gt_box_len):
            category = gt_box[j]['tag']
            if category not in categories:  # 该类型不在categories,就添加上去
                new_id = len(categories) + 1  # ID递增
                categories[category] = new_id
            category_id = categories[category]  # 重新获取它的类别ID
            fbox = gt_box[j]['fbox']  # 获得全身框
            # 对ignore进行处理,ignore有时在key:extra里,有时在key:head_attr里。属于互斥的。
            ignore = 0  # 下面key中都没有ignore时,就设为0,据观察,都存在,只是存在哪个字典里,需要判断一下
            if "ignore" in gt_box[j]['head_attr']:
                ignore = gt_box[j]['head_attr']['ignore']
            if "ignore" in gt_box[j]['extra']:
                ignore = gt_box[j]['extra']['ignore']
            # 对字典annotation进行设值。
            annotation = {'area': fbox[2] * fbox[3], 'iscrowd': ignore, 'image_id':  # 添加hbox、vbox字段。
                image_id, 'bbox': fbox, 'hbox': gt_box[j]['hbox'], 'vbox': gt_box[j]['vbox'],
                          'category_id': category_id, 'id': bbox_id, 'ignore': ignore, 'segmentation': []}
            # area的值,暂且就是fbox的宽高相乘了,观察里面的数据,发现fbox[2]小、fbox[3]很大,刚好一个全身框的宽很小,高就很大。(猜测),不是的话,再自行修改
            # segmentation怎么处理?博主自己也不知道,找不到对应的数据,这里就暂且不处理。
            # hbox、vbox、ignore是添加上去的,以防有需要。
            json_dict['annotations'].append(annotation)

            bbox_id += 1  # 框ID ++
        image_id += 1  # 这个image_id的递增操作,注意位置,博主一开始,放上面执行了,后面出了bug,自己可以理一下。
        # annotations的转化结束。
    # 下面这一步,对所有数据,只需执行一次,也就是对categories里的类别进行统计。
    for cate, cid in categories.items():
        # dict.items()返回列表list的所有列表项,形如这样的二元组list:[(key,value),(key,value),...]
        cat = {'supercategory': 'none', 'id': cid, 'name': cate}
        json_dict['categories'].append(cat)
    # 到此,json_dict的转化全部完成,对于其他的key,
    # 因为没有用到(不访问),就不需要给他们空间,也不需要去处理,字典是按key访问的,如果自己需要就自己添加上去就行
    json_fp = open(json_path, 'w')
    json_str = json.dumps(json_dict)  # 写json文件。
    json_fp.write(json_str)
    json_fp.close()

if __name__ == '__main__':
    crowdhuman2coco(r'D:\DataSet\CrowdHuman\annotation_train.odgt', r'D:\DataSet\CrowdHuman\annotation_train\annotation.json')
  1. .json转.txt
import json
import os
import xml.etree.ElementTree as ET
import cv2


# 将x1, y1, x2, y2转换成yolov5所需要的x, y, w, h格式
def xyxy2xywh(size, box):
    dw = 1. / size[0]
    dh = 1. / size[1]
    x = (box[0] + box[2]) / 2 * dw
    y = (box[1] + box[3]) / 2 * dh
    w = (box[2] - box[0]) * dw
    h = (box[3] - box[1]) * dh
    return (x, y, w, h)  # 返回的都是标准化后的值


def voc2yolo(path):
    # 可以打印看看该路径是否正确
    print(len(os.listdir(path)))
    pic_path = r'D:\DataSet\46_CUHK Occlusion Dataset\images'  # 原始图片路径
    txt_out_path = r'D:\DataSet\46_CUHK Occlusion Dataset\labels\txt'  # 转换后txt保存路径
    # 遍历每一个xml文件
    for file in os.listdir(path):
        print(file)
        if "json" in str(file):
            with open(os.path.join(path, file), 'r') as f:
                data = json.load(f)
                print(data)
                points = data['shapes'][0]['points']
                pic_name = os.path.join(pic_path, data['imagePath'])
                txt_name = data['imagePath'].split(".")[0] + ".txt"
                imread = cv2.imread(pic_name)
                h = imread.shape[0]
                w = imread.shape[1]
                print(imread.shape)
                xmin = points[0][0]
                ymin = points[0][1]
                xmax = points[2][0]
                ymax = points[2][1]
                box = [float(xmin), float(ymin), float(xmax),
                       float(ymax)]
                print(box)
                #
                # # 将x1, y1, x2, y2转换成yolov5所需要的x, y, w, h格式
                bbox = xyxy2xywh((w, h), box)
                print(bbox)

                # # 写入目标文件中,格式为 id x y w h
                with open(os.path.join(txt_out_path, txt_name), 'w') as out_file:
                    out_file.write(str(0) + " " + " ".join(str(x) for x in bbox) + '\n')
                out_file.close()
                # exit()


if __name__ == '__main__':
    # json格式数据路径
    path = r'D:\DataSet\46_CUHK Occlusion Dataset\labels\json'
    voc2yolo(path)
  1. .xml转.txt
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join


def convert(size, box):
    x_center = (box[0] + box[1]) / 2.0
    y_center = (box[2] + box[3]) / 2.0
    x = x_center / size[0]
    y = y_center / size[1]

    w = (box[1] - box[0]) / size[0]
    h = (box[3] - box[2]) / size[1]

    return (x, y, w, h)


def convert_annotation(xml_files_path, save_txt_files_path, classes):
    xml_files = os.listdir(xml_files_path)
    print(xml_files)
    for xml_name in xml_files:
        print(xml_name)
        xml_file = os.path.join(xml_files_path, xml_name)
        out_txt_path = os.path.join(save_txt_files_path, xml_name.split('.')[0] + '.txt')
        out_txt_f = open(out_txt_path, 'w')
        tree = ET.parse(xml_file)
        root = tree.getroot()
        size = root.find('size')
        w = int(size.find('width').text)
        h = int(size.find('height').text)

        for obj in root.iter('object'):
            difficult = obj.find('difficult').text
            cls = obj.find('name').text
            if cls not in classes or int(difficult) == 1:
                continue
            cls_id = classes.index(cls)
            xmlbox = obj.find('bndbox')
            b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
                 float(xmlbox.find('ymax').text))
            # b=(xmin, xmax, ymin, ymax)
            print(w, h, b)
            bb = convert((w, h), b)
            out_txt_f.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')


if __name__ == "__main__":
    classes = ['bird','cat',  'cow',  'dog', 'horse', 'sheep', 'person']
    # 1、voc格式的xml标签文件路径
    xml_files1 = r'D:\DataSet\person_datasets\CUHK01\xml'
    # 2、转化为yolo格式的txt标签文件存储路径
    save_txt_files1 = r'D:\DataSet\person_datasets\CUHK01\txt'

    convert_annotation(xml_files1, save_txt_files1, classes)

你可能感兴趣的:(机器学习,目标检测)