本文我们将使用sahi库对高分辨率图片进行切分 开始之前请确保使用的是coco格式的数据集,如果是voc的请参考我上一篇转化格式
sahi库是一个用于超大图片中对小目标检测的切片辅助超推理库
SAHI的Github地址: https://github.com/obss/sahi
新建一个py文件,将下述代码复制进去
import matplotlib.pyplot as plt
import json
import numpy as np
import argparse
def median(data):
data.sort()
mid = len(data) // 2
median = (data[mid] + data[~mid]) / 2
return median
def draw_distribution(width, height, out_path):
w_bins = int((max(width) - min(width)) // 10)
h_bins = int((max(height) - min(height)) // 10)
plt.figure()
plt.subplot(221)
plt.hist(width, bins=w_bins, color='green')
plt.xlabel('Width rate *1000')
plt.ylabel('number')
plt.title('Distribution of Width')
plt.subplot(222)
plt.hist(height, bins=h_bins, color='blue')
plt.xlabel('Height rate *1000')
plt.title('Distribution of Height')
plt.savefig(out_path)
print(f'Distribution saved as {out_path}')
plt.show()
def get_ratio_infos(jsonfile, out_img):
allannjson = json.load(open(jsonfile, 'r'))
be_im_id = 1
be_im_w = []
be_im_h = []
ratio_w = []
ratio_h = []
images = allannjson['images']
for i, ann in enumerate(allannjson['annotations']):
if ann['iscrowd']:
continue
x0, y0, w, h = ann['bbox'][:]
if be_im_id == ann['image_id']:
be_im_w.append(w)
be_im_h.append(h)
else:
im_w = images[be_im_id - 1]['width']
im_h = images[be_im_id - 1]['height']
im_m_w = np.mean(be_im_w)
im_m_h = np.mean(be_im_h)
dis_w = im_m_w / im_w
dis_h = im_m_h / im_h
ratio_w.append(dis_w)
ratio_h.append(dis_h)
be_im_id = ann['image_id']
be_im_w = [w]
be_im_h = [h]
im_w = images[be_im_id - 1]['width']
im_h = images[be_im_id - 1]['height']
im_m_w = np.mean(be_im_w)
im_m_h = np.mean(be_im_h)
dis_w = im_m_w / im_w
dis_h = im_m_h / im_h
ratio_w.append(dis_w)
ratio_h.append(dis_h)
mid_w = median(ratio_w)
mid_h = median(ratio_h)
ratio_w = [i * 1000 for i in ratio_w]
ratio_h = [i * 1000 for i in ratio_h]
print(f'Median of ratio_w is {mid_w}')
print(f'Median of ratio_h is {mid_h}')
print('all_img with box: ', len(ratio_h))
print('all_ann: ', len(allannjson['annotations']))
draw_distribution(ratio_w, ratio_h, out_img)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--json_path', type=str, default=None, help="Dataset json path.")
parser.add_argument(
'--out_img',
type=str,
default='box_distribution.jpg',
help="Name of distibution img.")
args = parser.parse_args()
get_ratio_infos(args.json_path, args.out_img)
if __name__ == "__main__":
main()
打开终端,输入下述语句,其中json_path需要指定对应的coco格式json
python box_distributed.py --json_path ./val2017.json
运行后,终端中会输出检测框相对应于原图的比例,当这个比例小于0.04并且原图尺寸大于1500的时候,则需要进行切图处理。
pip install sahi
新建一个py文件,将下述代码复制进去
import argparse
from tqdm import tqdm
def slice_data(image_dir, dataset_json_path, output_dir, slice_size,
overlap_ratio):
try:
from sahi.scripts.slice_coco import slice
except Exception as e:
raise RuntimeError(
'Unable to use sahi to slice images, please install sahi, for example: `pip install sahi`, see https://github.com/obss/sahi'
)
tqdm.write(
f" slicing for slice_size={slice_size}, overlap_ratio={overlap_ratio}")
slice(
image_dir=image_dir,
dataset_json_path=dataset_json_path,
output_dir=output_dir,
slice_size=slice_size,
overlap_ratio=overlap_ratio, )
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir', type=str, default=None, help="The image folder path.")
parser.add_argument(
'--json_path', type=str, default=None, help="Dataset json path.")
parser.add_argument(
'--output_dir', type=str, default=None, help="Output dir.")
parser.add_argument(
'--slice_size', type=int, default=500, help="slice_size")
parser.add_argument(
'--overlap_ratio', type=float, default=0.25, help="overlap_ratio")
args = parser.parse_args()
slice_data(args.image_dir, args.json_path, args.output_dir, args.slice_size,
args.overlap_ratio)
if __name__ == "__main__":
main()
终端运行脚本
python slice_image.py --image_dir ./dataset/data/train2017/ --json_path ./dataset/data/annotations/instances_train2017.json --output_dir ./dataset/data/sliced --slice_size 640 --overlap_ratio 0.25
切分后的子图文件夹与json标注文件共同保存在sliced文件夹下,这个文件夹下的文件就是未来的训练验证集