前言
所有数据集目录组织形式如下所示(以ICDAR2015为例)
cd icdar2015
目录组织形式
|-- results
| |-- result_0.jpg
|-- train
| |-- gt
| | |-- CIMG0005_convert.txt
| `-- img
| |-- CIMG0005_convert.jpg
`-- train.txt
wget https://rrc.cvc.uab.es/downloads/ch4_training_images.zip
wget https://rrc.cvc.uab.es/downloads/ch4_training_localization_transcription_gt.zip
wget https://rrc.cvc.uab.es/downloads/ch4_test_images.zip
wget https://rrc.cvc.uab.es/downloads/Challenge4_Test_Task1_GT.zip
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
import numpy as np
icdar2015_root_dir = os.getcwd()
icdar2015_train_img_dir = os.path.join(icdar2015_root_dir, 'train', 'img')
icdar2015_train_gt_dir = os.path.join(icdar2015_root_dir, 'train', 'gt')
icdar2015_test_img_dir = os.path.join(icdar2015_root_dir, 'test', 'img')
icdar2015_test_gt_dir = os.path.join(icdar2015_root_dir, 'test', 'gt')
print(f'icdar2015_root_dir:{icdar2015_root_dir}')
print(f'icdar2015_train_img_dir:{icdar2015_train_img_dir}')
print(f'icdar2015_train_gt_dir:{icdar2015_train_gt_dir}')
print(f'icdar2015_test_img_dir:{icdar2015_test_img_dir}')
print(f'icdar2015_test_gt_dir:{icdar2015_test_gt_dir}')
print('*'*80)
with open('train.txt', 'w') as f:
imgs = os.listdir(icdar2015_train_img_dir)
print(f'write train images:{len(imgs)}')
for img in imgs:
img_path = os.path.join(icdar2015_train_img_dir, img)
gt_name = 'gt_' + img.replace('jpg', 'txt')
gt_path = os.path.join(icdar2015_train_gt_dir, gt_name)
f.write(img_path + '\t' + gt_path + '\n')
with open('test.txt', 'w') as f:
imgs = os.listdir(icdar2015_test_img_dir)
print(f'write test images:{len(imgs)}')
for img in imgs:
img_path = os.path.join(icdar2015_test_img_dir, img)
gt_name = 'gt_' + img.replace('jpg', 'txt')
gt_path = os.path.join(icdar2015_test_gt_dir, gt_name)
f.write(img_path + '\t' + gt_path + '\n')
print(f'****************** generate label list done ******************')
#!/usr/bin/env python
# coding=utf-8
"""
@Autor: xinyi61
@Date: 2020-02-23 14:32:59
@LastEditors: xinyi61
@LastEditTime: 2020-02-24 16:22:44
@Email: [email protected]
@Version: 1.0
@Description:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
import random
import numpy as np
import matplotlib.pyplot as plt
import cv2
def get_annotation(label_path: str) -> tuple:
boxes = []
text_tags = []
with open(label_path, encoding="utf-8", mode="r") as f:
for line in f.readlines():
params = line.strip().strip("\ufeff").strip("\xef\xbb\xbf").split(",")
try:
label = params[8]
if label == "*" or label == "###":
text_tags.append(False)
else:
text_tags.append(True)
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(float, params[:8]))
boxes.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
except:
print("load label failed on {}".format(label_path))
return np.array(boxes, dtype=np.float32), np.array(text_tags, dtype=np.bool)
if __name__ == "__main__":
color = [255, 0, 0]
thickness = 1
result = []
with open("train.txt", "r") as f:
lines = f.readlines()
random.shuffle(lines)
for line in lines[0:10]:
line_info = line.rstrip("\n").split("\t")
image_path = line_info[0]
label_path = line_info[1]
points, text_tags = get_annotation(label_path)
img = cv2.imread(image_path)
for i in range(len(points)):
if text_tags[i] == False:
continue
point = points[i]
point = point.astype(int)
cv2.line(img, tuple(point[0]), tuple(point[1]), color, thickness)
cv2.line(img, tuple(point[1]), tuple(point[2]), color, thickness)
cv2.line(img, tuple(point[2]), tuple(point[3]), color, thickness)
cv2.line(img, tuple(point[3]), tuple(point[0]), color, thickness)
result.append(img[:, :, ::-1])
i = 1
for k in range(len(result)):
try:
plt.ion()
plt.figure(i)
cv2.imwrite("./results/result_{}.jpg".format(k), result[k])
plt.imshow(result[k][:, :, ::-1])
plt.pause(1)
except:
pass
finally:
i += 1
print(f"************* done ***************")
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_1.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_2.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_3.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_4.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_5.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_6.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_7.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_images_8.zip
wget http://datasets.cvc.uab.es/rrc/ch8_training_localization_transcription_gt_v2.zip
wget https://rrc.cvc.uab.es/downloads/ch8_validation_images.zip
wget http://datasets.cvc.uab.es/rrc/ch8_validation_localization_transcription_gt_v2.zip
import os
from PIL import Image
git_filename = 'img_401.gif'
# 使用Image模块的open()方法打开gif动态图像时,默认是第一帧
im = Image.open(git_filename)
try:
while True:
# 保存当前帧图片
current = im.tell()
im.save('img_401.png')
# 获取下一帧图片
# im.seek(current+1)
except EOFError:
pass
Step3 产生数据索引文件train.txt、test.txt
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
import numpy as np
icdar2017_root_dir = os.getcwd()
icdar2017_train_img_dir = os.path.join(icdar2017_root_dir, 'train', 'img')
icdar2017_train_gt_dir = os.path.join(icdar2017_root_dir, 'train', 'gt')
icdar2017_test_img_dir = os.path.join(icdar2017_root_dir, 'test', 'img')
icdar2017_test_gt_dir = os.path.join(icdar2017_root_dir, 'test', 'gt')
print(f'icdar2017_root_dir:{icdar2017_root_dir}')
print(f'icdar2017_train_img_dir:{icdar2017_train_img_dir}')
print(f'icdar2017_train_gt_dir:{icdar2017_train_gt_dir}')
print(f'icdar2017_test_img_dir:{icdar2017_test_img_dir}')
print(f'icdar2017_test_gt_dir:{icdar2017_test_gt_dir}')
print('*'*80)
with open('train.txt', 'w') as f:
imgs = os.listdir(icdar2017_train_img_dir)
print(f'write train images:{len(imgs)}')
for img in imgs:
img_path = os.path.join(icdar2017_train_img_dir, img)
if img_path.endswith('jpg'):
gt_name = 'gt_' + img.replace('jpg', 'txt')
elif img_path.endswith('png'):
gt_name = 'gt_' + img.replace('png', 'txt')
gt_path = os.path.join(icdar2017_train_gt_dir, gt_name)
f.write(img_path + '\t' + gt_path + '\n')
with open('test.txt', 'w') as f:
imgs = os.listdir(icdar2017_test_img_dir)
print(f'write test images:{len(imgs)}')
for img in imgs:
img_path = os.path.join(icdar2017_test_img_dir, img)
if img.endswith('jpg'):
gt_name = 'gt_' + img.replace('jpg', 'txt')
elif img.endswith('png'):
gt_name = 'gt_' + img.replace('png', 'txt')
gt_path = os.path.join(icdar2017_test_gt_dir, gt_name)
f.write(img_path + '\t' + gt_path + '\n')
print(f'****************** generate label list done ******************')
Step4 测试
测试代码如ICDAR2015
ICDAR2019
wget http://datasets.cvc.uab.es/rrc/ImagesPart1.zip
wget http://datasets.cvc.uab.es/rrc/ImagesPart2.zip
wget http://datasets.cvc.uab.es/rrc/train_gt_t13.zip
wget http://datasets.cvc.uab.es/rrc/MLT19_TestImagesPart1.zip
wget http://datasets.cvc.uab.es/rrc/MLT19_TestImagesPart2.zip
Note that this task only requires localization results (as indicated in results format in the tasks page), but the ground truth also provides the script id of each bounding box and the transcription. This extra information will be needed in Tasks 3 and 4.
Extra information about the training set (may be useful for researchers who focus on one or only few languages, not all of the multi-lingual set):
The 10,000 images are ordered in the training set such that: each consecutive 1000 images contain text of one main language (and it may of course contain additional text from 1 or 2 other languages, all from the set of the 10 languages)
- 00001 - 01000: Arabic
- 01001 - 02000: English
- 02001 - 03000: French
- 03001 - 04000: Chinese
- 04001 - 05000: German
- 05001 - 06000: Korean
- 06001 - 07000: Japanese
- 07001 - 08000: Italian
- 08001 - 09000: Bangla
- 09001 - 10000: Hindi
ICDAR2019
ICDAR2019
ICDAR2019