使用pytorch_yolo_nano进行人体目标检测

代码地址:https://github.com/ardeal/yolo_nano
查看pt,cuda,cudnn版本:

import torch
print(torch.__version__)
print(torch.version.cuda)
print(torch.backends.cudnn.version())

一、准备训练测试集

1 将coco数据集转成voc数据集格式,并提取需要的类别(修改路径即可)

from pycocotools.coco import COCO
import os
import shutil
from tqdm import tqdm
import skimage.io as io
import matplotlib.pyplot as plt
import cv2
from PIL import Image, ImageDraw

#the path you want to save your results for coco to voc
# savepath="E:/PyTorch-YOLOv3-master/data/coco/train2017/"
# img_dir=savepath+'images/'
# anno_dir=savepath+'train_Annotations/'
# datasets_list=['train2017']

savepath="E:/PyTorch-YOLOv3-master/data/coco/val2017/"
img_dir=savepath+'images/'
anno_dir=savepath+'val_Annotations/'
datasets_list=['val2017']

classes_names = ['person']
#Store annotations and train2014/val2014/... in this folder
dataDir= 'E:/MS_COCO/'

headstr = """\

    VOC
    %s
    
        My Database
        COCO
        flickr
        NULL
    
    
        NULL
        company
    
    
        %d
        %d
        %d
    
    0
"""
objstr = """\
    
        %s
        Unspecified
        0
        0
        
            %d
            %d
            %d
            %d
        
    
"""

tailstr = '''\

'''

#if the dir is not exists,make it,else delete it
def mkr(path):
    if os.path.exists(path):
        shutil.rmtree(path)
        os.mkdir(path)
    else:
        os.mkdir(path)
mkr(img_dir)
mkr(anno_dir)
def id2name(coco):
    classes=dict()
    for cls in coco.dataset['categories']:
        classes[cls['id']]=cls['name']
    return classes

def write_xml(anno_path,head, objs, tail):
    f = open(anno_path, "w")
    f.write(head)
    for obj in objs:
        f.write(objstr%(obj[0],obj[1],obj[2],obj[3],obj[4]))
    f.write(tail)


def save_annotations_and_imgs(coco,dataset,filename,objs):
    #eg:COCO_train2014_000000196610.jpg-->COCO_train2014_000000196610.xml
    anno_path=anno_dir+filename[:-3]+'xml'
    img_path=dataDir+dataset+'/'+filename
#     print(img_path)
    dst_imgpath=img_dir+filename

    img=cv2.imread(img_path)
    if (img.shape[2] == 1):
#         print(filename + " not a RGB image")
        return
    shutil.copy(img_path, dst_imgpath)

    head=headstr % (filename, img.shape[1], img.shape[0], img.shape[2])
    tail = tailstr
    write_xml(anno_path,head, objs, tail)


def showimg(coco,dataset,img,classes,cls_id,show=True):
    global dataDir
    I=Image.open('%s/%s/%s'%(dataDir,dataset,img['file_name']))
    #通过id,得到注释的信息
    annIds = coco.getAnnIds(imgIds=img['id'], catIds=cls_id, iscrowd=None)
    # print(annIds)
    anns = coco.loadAnns(annIds)
    # print(anns)
    # coco.showAnns(anns)
    objs = []
    for ann in anns:
        class_name=classes[ann['category_id']]
        if class_name in classes_names:
#             print(class_name)
            if 'bbox' in ann:
                bbox=ann['bbox']
                xmin = int(bbox[0])
                ymin = int(bbox[1])
                xmax = int(bbox[2] + bbox[0])
                ymax = int(bbox[3] + bbox[1])
                obj = [class_name, xmin, ymin, xmax, ymax]
                objs.append(obj)
                draw = ImageDraw.Draw(I)
                draw.rectangle([xmin, ymin, xmax, ymax])
    if show:
        plt.figure()
        plt.axis('off')
        plt.imshow(I)
        plt.show()

    return objs

for dataset in datasets_list:
    #./COCO/annotations/instances_train2014.json
    annFile='{}/annotations/instances_{}.json'.format(dataDir,dataset)

    #COCO API for initializing annotated data
    coco = COCO(annFile)
    '''
    COCO 对象创建完毕后会输出如下信息:
    loading annotations into memory...
    Done (t=0.81s)
    creating index...
    index created!
    至此, json 脚本解析完毕, 并且将图片和对应的标注数据关联起来.
    '''
    #show all classes in coco
    classes = id2name(coco)
#print(classes)
    #[1, 2, 3, 4, 6, 8]
    classes_ids = coco.getCatIds(catNms=classes_names)
#print(classes_ids)
    for cls in classes_names:
        #Get ID number of this class
        cls_id=coco.getCatIds(catNms=[cls])
        img_ids=coco.getImgIds(catIds=cls_id)
#print(cls,len(img_ids))
        # imgIds=img_ids[0:10]
        for imgId in tqdm(img_ids):
            img = coco.loadImgs(imgId)[0]
            filename = img['file_name']
            # print(filename)
            objs=showimg(coco, dataset, img, classes,classes_ids,show=False)
#print(objs)
            save_annotations_and_imgs(coco, dataset, filename, objs)

2 生成训练测试文件

import os  
import random
import xml.etree.ElementTree as ET
import pickle
from os import listdir, getcwd
from os.path import join

train_percent = 1.0
# xmlfilepath = 'E:/PyTorch-YOLOv3-master/data/coco/train2017/train_Annotations/'
# txtsavepath = 'E:/PyTorch-YOLOv3-master/data/coco/train2017/train2017.txt'
xmlfilepath = 'E:/PyTorch-YOLOv3-master/data/coco/val2017/val_Annotations/'
txtsavepath = 'E:/PyTorch-YOLOv3-master/data/coco/val2017/val2017.txt'
total_xml = os.listdir(xmlfilepath)
  
num=len(total_xml)
list=range(num)
tr=int(num*train_percent)
train= random.sample(list,tr)

  
ftrain = open(txtsavepath, 'w')
# ftest = open('ImageSets/Main/test.txt', 'w')  
# ftrain = open('ImageSets/Main/train.txt', 'w')  
# fval = open('ImageSets/Main/val.txt', 'w')  
  
for i in list:
    name=total_xml[i][:-4]+'\n'
    if i in train:
        ftrain.write(name)
#         if i in train:
#             ftrain.write(name)
#         else:
#             fval.write(name)
#     else:
#         ftest.write(name)

ftrain.close()
# ftrain.close()  
# fval.close()  
# ftest.close()

# sets=['train2017']  #替换为自己的数据集
sets=['val2017']
classes = ["person"]     #修改为自己的类别
#classes = ["eye", "nose"]

def convert(size, box):
    dw = 1./(size[0])
    dh = 1./(size[1])
    x = (box[0] + box[1])/2.0 - 1
    y = (box[2] + box[3])/2.0 - 1
    w = box[1] - box[0]
    h = box[3] - box[2]
    x = x*dw
    w = w*dw
    y = y*dh
    h = h*dh
    return (x,y,w,h)
def convert_annotation(image_id):
#     in_file = open('E:/PyTorch-YOLOv3-master/data/coco/train2017/train_Annotations/%s.xml' %(image_id))  #将数据集放于当前目录下
#     out_file = open('E:/PyTorch-YOLOv3-master/data/coco/train2017/labels/%s.txt' %(image_id), 'w')
    in_file = open('E:/PyTorch-YOLOv3-master/data/coco/val2017/val_Annotations/%s.xml' %(image_id))  #将数据集放于当前目录下
    out_file = open('E:/PyTorch-YOLOv3-master/data/coco/val2017/labels/%s.txt' %(image_id), 'w')
    tree=ET.parse(in_file)
    root = tree.getroot()
    size = root.find('size')
    w = int(size.find('width').text)
    h = int(size.find('height').text)
    for obj in root.iter('object'):
        difficult = obj.find('difficult').text
        cls = obj.find('name').text
        if cls not in classes or int(difficult)==1:
            continue
        cls_id = classes.index(cls)
        xmlbox = obj.find('bndbox')
        b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
        bb = convert((w,h), b)
        out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + ' ')
    in_file.close()
    out_file.close()
        
wd = getcwd()
for image_set in sets:
    if not os.path.exists('E:/PyTorch-YOLOv3-master/'):
        os.makedirs('E:/PyTorch-YOLOv3-master/')
#     image_ids = open('E:/PyTorch-YOLOv3-master/data/coco/train2017/%s.txt'%(image_set)).read().strip().split()
#     list_file = open('E:/PyTorch-YOLOv3-master/data/coco/train2017/train.txt', 'w')
    image_ids = open('E:/PyTorch-YOLOv3-master/data/coco/val2017/%s.txt'%(image_set)).read().strip().split()
    list_file = open('E:/PyTorch-YOLOv3-master/data/coco/val2017/val.txt', 'w')
    for image_id in image_ids:
#         list_file.write('E:/PyTorch-YOLOv3-master/data/coco/train2017/images/%s.jpg\n'%(image_id))
        list_file.write('E:/PyTorch-YOLOv3-master/data/coco/val2017/images/%s.jpg\n'%(image_id))
        convert_annotation(image_id)
    list_file.close()   

二、修改网络参数

1 修改config/coco.data和data/coco.names文件
2 修改opt.py文件中的参数(num_classes、batch_size等)
3 可按需求修改network/yolo_nano_network.py中的输出格式:

self.num_classes = num_classes
self.image_size = image_size
self.num_anchors = 3
self.yolo_channels = (self.num_classes + 5) * self.num_anchors
anchors52 = [[10,13], [16,30], [33,23]] # 52x52
anchors26 = [[30,61], [62,45], [59,119]] # 26x26
anchors13 = [[116,90], [156,198], [373,326]] # 13x13

三、网络训练

python train_yolonano.py

四、训练遇到的问题:

1 索引超出边界

Traceback (most recent call last):
  File "train_yolonano.py", line 94, in <module>
    loss, outputs = model(imgs, targets)
  File "D:\Anaconda3\envs\pt\lib\site-packages\torch\nn\modules\module.py", line 547, in __call__
    result = self.forward(*input, **kwargs)
  File "E:\Pytorch_yolo_nano-master\network\yolo_nano_network.py", line 137, in forward
    temp, layer_loss = self.yolo_layer52(out_conv9, targets, image_size)
  File "D:\Anaconda3\envs\pt\lib\site-packages\torch\nn\modules\module.py", line 547, in __call__
    result = self.forward(*input, **kwargs)
  File "E:\Pytorch_yolo_nano-master\network\basic_layers.py", line 197, in forward
    ignore_thres=self.ignore_thres,
  File "E:\Pytorch_yolo_nano-master\utils\common_funcs.py", line 303, in build_targets
    obj_mask[b, best_n, gj, gi] = 1
IndexError: index 52 is out of bounds for dimension 3 with size 52

解决方法:
修改文件E:\Pytorch_yolo_nano-master\utils\common_funcs.py,增加边界判断

gx, gy = gxy.t()
gw, gh = gwh.t()
gi, gj = gxy.long().t()
# Set masks
gi[gi < 0] = 0
gj[gj < 0] = 0
gi[gi > nG - 1] = nG - 1
gj[gj > nG - 1] = nG - 1
# print(b, best_n, gj, gi)
obj_mask[b, best_n, gj, gi] = 1
noobj_mask[b, best_n, gj, gi] = 0

你可能感兴趣的:(深度学习,pytorch)