Yolo-v3代码fork from https://github.com/eriklindernoren/PyTorch-YOLOv3
Coco数据集需要自行下载
Step 2 提取图片和标注信息
首先运行下面的代码从原coco数据集中提取需要的类的图片,需要修改的地方有:
savepath
dataset_List
classes_names
dataDir
from pycocotools.coco import COCO
import os
import shutil
from tqdm import tqdm
import skimage.io as io
import matplotlib.pyplot as plt
import cv2
from PIL import Image, ImageDraw
#the path you want to save your results for coco to voc
savepath="/coco_class/"
img_dir=savepath+'images/val2014/'
anno_dir=savepath+'Annotations/val2014/'# datasets_list=['train2014', 'val2014']# datasets_list=['train2014']
datasets_list=['val2014']
classes_names =["person","bicycle","car","motorbike","bus","truck"]#Store annotations and train2014/val2014/... in this folder
dataDir='/coco/'
headstr ="""\
VOC%sMy DatabaseCOCOflickrNULL%d%d%d0
"""
objstr ="""\
"""
tailstr ='''\
'''#if the dir is not exists,make it,else delete itdefmkr(path):if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)else:
os.mkdir(path)
mkr(img_dir)
mkr(anno_dir)defid2name(coco):
classes=dict()for cls in coco.dataset['categories']:
classes[cls['id']]=cls['name']return classes
defwrite_xml(anno_path,head, objs, tail):
f =open(anno_path,"w")
f.write(head)for obj in objs:
f.write(objstr%(obj[0],obj[1],obj[2],obj[3],obj[4]))
f.write(tail)defsave_annotations_and_imgs(coco,dataset,filename,objs):#eg:COCO_train2014_000000196610.jpg-->COCO_train2014_000000196610.xml
anno_path=anno_dir+filename[:-3]+'xml'
img_path=dataDir+'images/'+dataset+'/'+filename
# print(img_path)
dst_imgpath=img_dir+filename
print(img_path,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
img=cv2.imread(img_path)# print(img)if(img.shape[2]==1):print(filename +" not a RGB image")return
shutil.copy(img_path, dst_imgpath)
head=headstr %(filename, img.shape[1], img.shape[0], img.shape[2])
tail = tailstr
write_xml(anno_path,head, objs, tail)defshowimg(coco,dataset,img,classes,cls_id,show=True):global dataDir
I=Image.open('%s/%s/%s/%s'%(dataDir,'images',dataset,img['file_name']))#Get the annotated information by ID
annIds = coco.getAnnIds(imgIds=img['id'], catIds=cls_id, iscrowd=None)# print(annIds)
anns = coco.loadAnns(annIds)# print(anns)# coco.showAnns(anns)
objs =[]for ann in anns:
class_name=classes[ann['category_id']]if class_name in classes_names:print(class_name)if'bbox'in ann:
bbox=ann['bbox']
xmin =int(bbox[0])
ymin =int(bbox[1])
xmax =int(bbox[2]+ bbox[0])
ymax =int(bbox[3]+ bbox[1])
obj =[class_name, xmin, ymin, xmax, ymax]
objs.append(obj)
draw = ImageDraw.Draw(I)
draw.rectangle([xmin, ymin, xmax, ymax])if show:
plt.figure()
plt.axis('off')
plt.imshow(I)
plt.show()return objs
for dataset in datasets_list:#./COCO/annotations/instances_train2014.json
annFile='{}/annotations/instances_{}.json'.format(dataDir,dataset)#COCO API for initializing annotated data
coco = COCO(annFile)'''
When the COCO object is created, the following information will be output:
loading annotations into memory...
Done (t=0.81s)
creating index...
index created!
So far, the JSON script has been parsed and the images are associated with the corresponding annotated data.
'''#show all classes in coco
classes = id2name(coco)print(classes)#[1, 2, 3, 4, 6, 8]
classes_ids = coco.getCatIds(catNms=classes_names)print(classes_ids)# exit()for cls in classes_names:#Get ID number of this class
cls_id=coco.getCatIds(catNms=[cls])
img_ids=coco.getImgIds(catIds=cls_id)print(cls,len(img_ids))# imgIds=img_ids[0:10]for imgId in tqdm(img_ids):
img = coco.loadImgs(imgId)[0]
filename = img['file_name']# print(filename)
objs=showimg(coco, dataset, img, classes,classes_ids,show=False)print(objs)
save_annotations_and_imgs(coco, dataset, filename, objs)
/*
*开发触发器
*/
--得到日期是周几
select to_char(sysdate+4,'DY','nls_date_language=AMERICAN') from dual;
select to_char(sysdate,'DY','nls_date_language=AMERICAN') from dual;
--建立BEFORE语句触发器
CREATE O
下面给大家整理了一些vim NERDTree的常用快捷键了,这里几乎包括了所有的快捷键了,希望文章对各位会带来帮助。
切换工作台和目录
ctrl + w + h 光标 focus 左侧树形目录ctrl + w + l 光标 focus 右侧文件显示窗口ctrl + w + w 光标自动在左右侧窗口切换ctrl + w + r 移动当前窗口的布局位置
o 在已有窗口中打开文件、目录或书签,并跳