ValueError: With n_samples=0, test_size=0.2 and train_size=None, the resulting train set will be emp

今天写代码labelmetovoc,即将labelme标注的转化为voc标准格式参考的这篇文章时遇到了如下问题:
ValueError: With n_samples=0, test_size=0.2 and train_size=None, the resulting train set will be empty. Adjust any of the aforementioned parameters.
在网上查了一下,大部分博客都认为是scikit-learn版本较高时出现的问题,需要换到0.20.0以下版本,但是换的话numpy,scipy等库均要重新下载兼容的版本,所以建议不要轻易尝试,我请教了一下师兄,他说是因为参数上面参数设置出现了问题,train或test的数量必须为整数,改好的代码如下所示:

# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 13:39:17 2020

@author: nihao
"""
import os
import numpy as np
import codecs
import json
import glob
import cv2
import shutil
from sklearn.model_selection import train_test_split

# 1.标签路径
labelme_path = "D:\\PinInspection\\jpgretanglelabel"  # 原始labelme标注数据路径
saved_path = "D:\\PinInspection\\improveto300\\VOC2007"  # 保存路径

# 2.创建要求文件夹
dst_annotation_dir = os.path.join(saved_path, 'Annotations')
if not os.path.exists(dst_annotation_dir):
    os.makedirs(dst_annotation_dir)
dst_image_dir = os.path.join(saved_path, "JPEGImages")
if not os.path.exists(dst_image_dir):
    os.makedirs(dst_image_dir)
dst_main_dir = os.path.join(saved_path, "ImageSets", "Main")
if not os.path.exists(dst_main_dir):
    os.makedirs(dst_main_dir)

# 3.获取待处理文件
org_json_files = sorted(glob.glob(os.path.join(labelme_path, '*.json')))
org_json_file_names = [i.split("\\")[-1].split(".json")[0] for i in org_json_files]
org_img_files = sorted(glob.glob(os.path.join(labelme_path, '*.jpg')))
org_img_file_names = [i.split("\\")[-1].split(".jpg")[0] for i in org_img_files]

# 4.labelme file to voc dataset
for i, json_file_ in enumerate(org_json_files):
    json_file = json.load(open(json_file_, "r", encoding="utf-8"))
    image_path = os.path.join(labelme_path, org_json_file_names[i]+'.jpg')
    img = cv2.imread(image_path)
    height, width, channels = img.shape
    dst_image_path = os.path.join(dst_image_dir, "{:06d}.jpg".format(i))
    cv2.imwrite(dst_image_path, img)
    dst_annotation_path = os.path.join(dst_annotation_dir, '{:06d}.xml'.format(i))
    with codecs.open(dst_annotation_path, "w", "utf-8") as xml:
        xml.write('\n')
        xml.write('\t' + 'Pin_detection' + '\n')
        xml.write('\t' + "{:06d}.jpg".format(i) + '\n')
        # xml.write('\t\n')
        # xml.write('\t\tThe UAV autolanding\n')
        # xml.write('\t\tUAV AutoLanding\n')
        # xml.write('\t\tflickr\n')
        # xml.write('\t\tNULL\n')
        # xml.write('\t\n')
        # xml.write('\t\n')
        # xml.write('\t\tNULL\n')
        # xml.write('\t\tChaojieZhu\n')
        # xml.write('\t\n')
        xml.write('\t\n')
        xml.write('\t\t' + str(width) + '\n')
        xml.write('\t\t' + str(height) + '\n')
        xml.write('\t\t' + str(channels) + '\n')
        xml.write('\t\n')
        xml.write('\t\t0\n')
        for multi in json_file["shapes"]:
            points = np.array(multi["points"])
            xmin = min(points[:, 0])
            xmax = max(points[:, 0])
            ymin = min(points[:, 1])
            ymax = max(points[:, 1])
            label = multi["label"]
            if xmax <= xmin:
                pass
            elif ymax <= ymin:
                pass
            else:
                xml.write('\t\n')
                xml.write('\t\t' + label + '\n')
                xml.write('\t\tUnspecified\n')
                xml.write('\t\t1\n')
                xml.write('\t\t0\n')
                xml.write('\t\t\n')
                xml.write('\t\t\t' + str(xmin) + '\n')
                xml.write('\t\t\t' + str(ymin) + '\n')
                xml.write('\t\t\t' + str(xmax) + '\n')
                xml.write('\t\t\t' + str(ymax) + '\n')
                xml.write('\t\t\n')
                xml.write('\t\n')
                print(json_file_, xmin, ymin, xmax, ymax, label)
        xml.write('')

# 5.split files for txt
train_file = os.path.join(dst_main_dir, 'train.txt')
trainval_file = os.path.join(dst_main_dir, 'trainval.txt')
val_file = os.path.join(dst_main_dir, 'val.txt')
test_file = os.path.join(dst_main_dir, 'test.txt')

ftrain = open(train_file, 'w')
ftrainval = open(trainval_file, 'w')
fval = open(val_file, 'w')
ftest = open(test_file, 'w')

total_annotation_files = glob.glob(os.path.join(dst_annotation_dir, "*.xml"))
total_annotation_names = [i.split("\\")[-1].split(".xml")[0] for i in total_annotation_files]

# test_filepath = ""
for file in total_annotation_names:
    ftrainval.writelines(file + '\n')
# test
# for file in os.listdir(test_filepath):
#    ftest.write(file.split(".jpg")[0] + "\n")
# split
train_files, val_files = train_test_split(total_annotation_names, test_size=0.2)
# train
for file in train_files:
    ftrain.write(file + '\n')
# val
for file in val_files:
    fval.write(file + '\n')

ftrainval.close()
ftrain.close()
fval.close()
# ftest.close()

只需要
1.将自己的输入地址写入labelme_path = “D:\PinInspection\jpgretanglelabel” # 原始labelme标注数据路径
里,jpgretanglelabel文件夹里需要同时存放原图与标注的json文件,文件命名对应(一张标注的json文件对应一张原图)。
2.将自己及的输出地址写入saved_path = “D:\PinInspection\improveto300\VOC2007” # 保存路径
这样问题就解决了~

你可能感兴趣的:(深度学习系列,python,xml,json)