这个是github的下载地址
git clone https://github.com/ultralytics/yolov5
如果没有下载成功,就用下面码云的下载地址。
git clone https://gitee.com/pursuit_zhangyu/yolov5-pytorch
conda create -n yolov5 python=3.8
#激活python3.8的虚拟环境
source activate yolov5
conda install cudatoolkit=9.0 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/linux-64/
pip install -U -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple
因为requirements.txt里面的依赖项太多,如果没有安装完全的话,缺什么安装什么,具体如下:
pip install matplotlib>=3.2.2 -i https://mirrors.aliyun.com/pypi/simple
pip install numpy>=1.18.5 -i https://mirrors.aliyun.com/pypi/simple
pip install opencv-python>=4.1.2 -i https://mirrors.aliyun.com/pypi/simple
pip install Pillow -i https://mirrors.aliyun.com/pypi/simple
pip install PyYAML>=5.3 -i https://mirrors.aliyun.com/pypi/simple
pip install scipy>=1.4.1 -i https://mirrors.aliyun.com/pypi/simple
pip install tensorboard>=2.2 -i https://mirrors.aliyun.com/pypi/simple
pip install torch>=1.7.0 -i https://mirrors.aliyun.com/pypi/simple
pip install torchvision>=0.8.1 -i https://mirrors.aliyun.com/pypi/simple
pip install tqdm>=4.41.0 -i https://mirrors.aliyun.com/pypi/simple
pip install pandas -i https://mirrors.aliyun.com/pypi/simple
pip install seaborn -i https://mirrors.aliyun.com/pypi/simple
pip install pycocotools>=2.0 -i https://mirrors.aliyun.com/pypi/simple
python detect.py --source data/images --weights yolov5m.pt --conf 0.25
新建data my_lables_3.py,数据具体分成成训练集:测试集=8:2,三个类别。
#coding=utf8
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
classes = ["person", "helmet_on", "helmet_off"]
def convert(size, box):
dw = 1./(size[0])
dh = 1./(size[1])
x = (box[0] + box[1])/2.0 - 1
y = (box[2] + box[3])/2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(file_name, label_path):
in_file = open(file_name, encoding="utf-8")
out_file = open(label_path, 'w')
# print("in_file:", in_file)
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
if obj.find("name") == None:
continue
cls = obj.find('name').text
if cls not in classes:
continue
print("cls:", cls)
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
in_file.close()
out_file.close()
def convert_dataset(image_dir, xml_dir, train_txt_path, test_txt_path, out_label_path):
print("inter convert")
if not os.path.exists(out_label_path):
os.makedirs(out_label_path)
train_txt = open(train_txt_path, "w")
test_txt = open(test_txt_path, "w")
image_list = os.listdir(image_dir)
for i, image_name in enumerate(image_list):
if ".jpg" in image_name:
print(image_name)
label_name = image_name.replace(".jpg", ".txt")
xml_name = image_name.replace(".jpg", ".xml")
label_path = os.path.join(out_label_path, label_name)
image_path = os.path.join(image_dir, image_name)
xml_path = os.path.join(xml_dir, xml_name)
if i % 5 != 0:
train_txt.write(image_path + "\n")
else:
test_txt.write(image_path + "\n")
convert_annotation(xml_path, label_path)
train_txt.close()
if __name__ == "__main__":
image_dir = "JPEGImages" #这个最好写绝对地址
xml_dir = "Annotations"
out_label_path = "labels"
train_txt_path = "train_3.txt"
test_txt_path = "test_3.txt"
convert_dataset(image_dir, xml_dir, train_txt_path, test_txt_path, out_label_path)
生成一个label文件夹,两个文件train_3.txt、test_3.txt,其中label是标签信息,train_3.txt是训练图片的地址,test_3.txt为测试图片的地址
cd data
cp coco.yaml helmet.yaml
vim helmet.yaml
# COCO 2017 dataset http://cocodataset.org
# Train command: python train.py --data coco.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco
# /yolov5
# download command/URL (optional)
#download: bash data/scripts/get_coco.sh
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: /xx/train_3.txt # 118287 images
val: /xx/test_3.txt # 5000 images
test: /xx/test_3.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
# number of classes 类别数量需要修改
nc: 3
# class names 类别名称需要修改
names: [ 'person', 'helmet_on', 'helmet_off' ]
# Print classes
# with open('data/coco.yaml') as f:
# d = yaml.load(f, Loader=yaml.FullLoader) # dict
# for i, x in enumerate(d['names']):
# print(i, x)
cd models
cp yolov5s.yaml helmets.yaml
vim helmets.yaml
我的数据集是三个类别
# parameters
nc: 3 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple
# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32
# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
]
# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [24, False]], # 17 (P3/8-small) #需要修改(3*(classnums+5))
[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [24, False]], # 20 (P4/16-medium) #需要修改(3*(classnums+5))
[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [24, False]], # 23 (P5/32-large) #需要修改(3*(classnums+5))
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]
因为图片文件夹的名称不时images,需要修改一下代码
vim utils/datasets.py
# sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
sa, sb = os.sep + 'JPEGImages' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
python train.py --data ./data/helmets.yaml --cfg ./models/helmets.yaml --batch-size 64 --device 0,1,2,3
后台运行
nohup python train.py --data ./data/helmets.yaml --cfg ./models/helmets.yaml --batch-size 64 --device 0,1,2,3 &
打开nohup.out可以查看训练情况
python test.py --weights runs/train/exp3/weights/last.pt --data ./data/helmets.yaml --device 0,1,2,3
安装依赖环境
pip install onnx>=1.8.0 -i https://mirrors.aliyun.com/pypi/simple
pip install coremltools==4.0 -i https://mirrors.aliyun.com/pypi/simple
运行
python models/export.py --weights runs/train/exp3/weights/last.pt --img 640 --batch 1
结果
Starting ONNX export with onnx 1.8.1...
ONNX export success, saved as runs/train/exp3/weights/last.onnx