传统的算法 最主要的是先猜很多候选框,然后使用特征工程来提取特征(特征向量),最后使用传统的机器学习工具进行训练。然而复杂的过程可能会导致引入大量的噪声,丢失很多信息。
从传统的可以总结出目标检测可以分为两个阶段:候选框生成、回归分类。
yolov1是单阶段的目标检测算法。
yolov1的算法流程:
yolov1特征总结:
1 优势:
one-stage,非常快
2 缺点:
对拥挤情况不好,7*7网格太小(一个网格检测一个物体),对于稠密物体效果不好
对小物体检测不好
对形状变化大的物体不好
没有用BN
yovov2算法改进:
1、引入BN
2、更高精度的classifier ,yolov1分为7*7网格(对稠密物体不友好),yolov2则分为13*13的网格
3、多尺度训练
- 移除了FC层可以接收任意尺度
- 从320*320,352*352 训练至680*680
4、细粒度(fine-grained)特征
- 浅层特征直连深层 (浅层特征主要学物体的边缘特征,深层特征主要学习物体的语义信息)
- 引入新层“reorg”(即“slice”层) (后续被抛弃,在yolov7中又被重新引入)
5、YOLO系列首次使用了Anchor(非常重要)
- Anchor 是什么
- 为什么要用Anchor
anchor机制
Anchor的含义:
- 预设好的虚拟边框
- 生成框由Anchor回归而来
1 新的网络模块
2 多尺度结构
3 Multi-class loss
softmax loss (one hot ) -> logistic regression loss
yolo-head部分会含有anchor,也就是说yolo-head 输出的其实就是真实框和anchor的偏移量
1 数据改良(各种数据增广方案)
2 网络框架改良
激活函数
正则化改良
现代框架:解耦与第三分支
3 损失函数改良
Focal Loss
IOU/GIOU/
4 yolo后续
voctoyolo
import os
import xml.etree.ElementTree as ET
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def xml2txt(xml_file, f, classes):
"""
xml_file:xml 文件
f: 读取的信息以追加的方式追加到txt_file文件中
classes:所有的类别
"""
# 解析 xml 文件
tree = ET.parse(xml_file)
root = tree.getroot()
# 读取高宽等信息
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
# 遍历 xml 中的目标
for obj in root.iter('object'):
difficult = obj.find('difficult').text # 跳过困难样本
cls = obj.find('name').text # 获取目标类别
# 跳过无需转换的类别
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls) # 转换为对应索引 从 0 开始
xmlbox = obj.find('bndbox') # 读取标注信息 [xmin, ymin, xmax, ymax]
b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)), int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text))) # 字符型转换为float
# b = convert((w, h), b) # 转换为yolo格式 [xcenter, ycenter, box_w, box_h]
f.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
f.write('\n')
if __name__ == '__main__':
classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
def create_txt(img_dir,xml_dir,txt_file,classes):
img_total = os.listdir(img_dir)
with open(txt_file, mode='w', encoding='utf-8') as f:
for img in img_total:
if not img.endswith(('jpg')):
continue
img_path = os.path.join(img_dir, img)
xml_path = os.path.join(xml_dir, img.split('.')[0] + '.xml')
f.write(img_path)
xml2txt(xml_path, f, classes)
create_txt(img_dir='./VOCdevkit/VOC2007/JPEGImages',xml_dir='./VOCdevkit/VOC2007/Annotations',txt_file='./train.txt',classes=classes)
import os
import shutil
label_total=os.listdir('./Anonations')
for label in label_total:
print(label)
label_name=label.split('.')[0]
shutil.move(os.path.join('./1/',label_name+'.bmp'),os.path.join('./2/',label_name+'.bmp'))
import os
import shutil
import cv2
import xml.etree.ElementTree as ET
annoation=os.listdir('./Anonations')
for label in annoation:
name=label.split('.')[0]
# print(name)
# shutil.move(os.path.join('./image',name+'.bmp'),os.path.join('img1',name+'.bmp'))
image_name=os.path.join('image',name+'.bmp')
image=cv2.imread(image_name)
annoation_path=os.path.join('./Anonations',label)
# print(annoation_path)
#画候选框
#提取候选框
tree=ET.parse(annoation_path)
root=tree.getroot()
for obj in root.iter('object'):
cls=obj.find('name').text
print('类别',cls)
xmlbox=obj.find('bndbox') #读取标注信息 [xmin,ymin,xmax,yamx]
b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)),
int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))
cv2.rectangle(image,(b[0],b[1]),(b[2],b[3]),(255,255,2552),thickness=1)
cv2.putText(image,text=cls,org=(b[0],b[1]),fontFace=cv2.FONT_HERSHEY_SIMPLEX,fontScale=0.5,color=(255,0,0))
cv2.imshow('image',image)
cv2.waitKey(0)
import random
import os
#生成train.txt和val.txt
random.seed(8888)
#---------------------修改为自己的路径----------------------------------------------------
xml_dir = './annotations'#标签文件地址
img_dir = './images'#图像文件地址
#---------------------修改为自己的路径----------------------------------------------------
path_list = list()
for img in os.listdir(img_dir):
img_path = os.path.join(img_dir,img)
xml_path = os.path.join(xml_dir,img.replace('jpg', 'xml')) ##这里将图片后缀替换为xml
path_list.append((img_path, xml_path))
random.shuffle(path_list)
#这里用于测试,因为数据量比较大,cpu带不动
test_data_lenth=50
path_list=path_list[:test_data_lenth]
ratio = 0.9
#---------------------train/val之前修改为自己的路径----------------------------------------------------
train_f = open('./train.txt','w') #生成训练文件
val_f = open('./val.txt' ,'w')#生成验证文件
#---------------------修改为自己的路径----------------------------------------------------
for i ,content in enumerate(path_list):
img, xml = content
text = img + ' ' + xml + '\n'
if i < len(path_list) * ratio:
train_f.write(text)
else:
val_f.write(text)
train_f.close()
val_f.close()
#生成标签文档
label = ['speedlimit','crosswalk','trafficlight'] #设置你想检测的类别
#---------------------label_list之前修改为自己的路径----------------------------------------------------
with open('./label_list.txt', 'w',encoding='utf-8') as f:
# ---------------------label_list之前修改为自己的路径----------------------------------------------------
for text in label:
f.write(text+'\n')
【目标检测】将xml标注文件转换为txt格式,voc标注格式转为yolo的txt格式_voc转txt_悠悠青青的博客-CSDN博客
对图像进行变换时也需要对真实框进行变换
import cv2
import numpy as np
import torch
from PIL import Image
from torch.utils.data.dataset import Dataset
# ---------------------------------------------------------#
# 将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
# ---------------------------------------------------------#
def cvtColor(image):
if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
return image
else:
image = image.convert('RGB')
return image
def preprocess_input(image):
image /= 255.0
return image
class YoloDataset(Dataset):
def __init__(self, annotation_lines, input_shape=[416, 416], num_classes=20, train=False):
super(YoloDataset, self).__init__()
self.annotation_lines = annotation_lines
self.input_shape = input_shape
self.num_classes = num_classes
self.length = len(self.annotation_lines)
self.train = train
def __len__(self):
return self.length
def __getitem__(self, index):
index = index % self.length
# ---------------------------------------------------#
# 训练时进行数据的随机增强
# 验证时不进行数据的随机增强
# ---------------------------------------------------#
image, box = self.get_random_data(self.annotation_lines[index], self.input_shape[0:2], random=self.train)
image = np.transpose(preprocess_input(np.array(image, dtype=np.float32)), (2, 0, 1))
box = np.array(box, dtype=np.float32)
if len(box) != 0:
box[:, [0, 2]] = box[:, [0, 2]] / self.input_shape[1]
box[:, [1, 3]] = box[:, [1, 3]] / self.input_shape[0]
box[:, 2:4] = box[:, 2:4] - box[:, 0:2]
box[:, 0:2] = box[:, 0:2] + box[:, 2:4] / 2
return image, box
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=0.7, val=0.4, random=True):
line = annotation_line.split()
# ------------------------------#
# 读取图像并转换成RGB图像
# ------------------------------#
image = Image.open(line[0])
image = cvtColor(image)
# ------------------------------#
# 获得图像的高宽与目标高宽
# ------------------------------#
iw, ih = image.size #iw表示原始图片的宽,ih表示原始图片的高
h, w = input_shape #模型输入的高和宽
# ------------------------------#
# 获得预测框
# ------------------------------#
box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])
if not random:
##random=False 时,即验证时,不需要进行数据增强
scale = min(w / iw, h / ih) ##输入图片大小跟算法处理的大小不一致,需要进行变换
nw = int(iw * scale) #这是因为图片必须同一比例进行缩放
nh = int(ih * scale)
dx = (w - nw) // 2 #这里因为使用同一比例进行缩放时 可能会导致有些图片变小,所以这时需要进行填充,//2,两边填充
dy = (h - nh) // 2
# ---------------------------------#
# 将图像多余的部分加上灰条
# ---------------------------------#
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32)
# ---------------------------------#
# 对真实框进行调整
# ---------------------------------#
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
return image_data, box
# ------------------------------------------#
# 对图像进行缩放并且进行长和宽的扭曲
# ------------------------------------------#
new_ar = iw / ih * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)
scale = self.rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# ------------------------------------------#
# 将图像多余的部分加上灰条
# ------------------------------------------#
dx = int(self.rand(0, w - nw))
dy = int(self.rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# ------------------------------------------#
# 翻转图像
# ------------------------------------------#
flip = self.rand() < .5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
image_data = np.array(image, np.uint8)
# ---------------------------------#
# 对图像进行色域变换
# 计算色域变换的参数
# ---------------------------------#
r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1
# ---------------------------------#
# 将图像转到HSV上
# ---------------------------------#
hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV))
dtype = image_data.dtype
# ---------------------------------#
# 应用变换
# ---------------------------------#
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB)
# ---------------------------------#
# 对真实框进行调整
# ---------------------------------#
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
return image_data, box
if __name__=='__main__':
with open('train.txt',mode='r',encoding='utf-8') as f:
annotation_lines=f.readlines()
print(annotation_lines)
dataset=YoloDataset(annotation_lines=annotation_lines)
image_data,box=dataset[0]
print(image_data.shape,box)
# -------------------------------------------------------------------------------------------------------#
# kmeans虽然会对数据集中的框进行聚类,但是很多数据集由于框的大小相近,聚类出来的9个框相差不大,
# 这样的框反而不利于模型的训练。因为不同的特征层适合不同大小的先验框,shape越小的特征层适合越大的先验框
# 原始网络的先验框已经按大中小比例分配好了,不进行聚类也会有非常好的效果。
# -------------------------------------------------------------------------------------------------------#
import glob
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
def cas_iou(box, cluster):
x = np.minimum(cluster[:, 0], box[0])
y = np.minimum(cluster[:, 1], box[1])
intersection = x * y
area1 = box[0] * box[1]
area2 = cluster[:, 0] * cluster[:, 1]
iou = intersection / (area1 + area2 - intersection)
return iou
def avg_iou(box, cluster):
return np.mean([np.max(cas_iou(box[i], cluster)) for i in range(box.shape[0])])
def kmeans(box, k):
# -------------------------------------------------------------#
# 取出一共有多少框
# -------------------------------------------------------------#
row = box.shape[0]
# -------------------------------------------------------------#
# 每个框各个点的位置
# -------------------------------------------------------------#
distance = np.empty((row, k))
# -------------------------------------------------------------#
# 最后的聚类位置
# -------------------------------------------------------------#
last_clu = np.zeros((row,))
np.random.seed()
# -------------------------------------------------------------#
# 随机选k个当聚类中心
# -------------------------------------------------------------#
cluster = box[np.random.choice(row, k, replace=False)]
iter = 0
while True:
# -------------------------------------------------------------#
# 计算当前框和先验框的宽高比例
# -------------------------------------------------------------#
for i in range(row):
distance[i] = 1 - cas_iou(box[i], cluster)
# -------------------------------------------------------------#
# 取出最小点
# -------------------------------------------------------------#
near = np.argmin(distance, axis=1) #求每一个框距离哪一个中心点的位置最近
if (last_clu == near).all():
break
# -------------------------------------------------------------#
# 求每一个类的中位点
# -------------------------------------------------------------#
for j in range(k):
cluster[j] = np.median(
box[near == j], axis=0) #near==j 说明这些框是一个类别的
last_clu = near
if iter % 5 == 0:
print('iter: {:d}. avg_iou:{:.2f}'.format(iter, avg_iou(box, cluster)))
iter += 1
return cluster, near #cluster是最后的聚类中心,near是每一个框属于哪一个聚类中心
def load_data(path):
data = []
# -------------------------------------------------------------#
# 对于每一个xml都寻找box
# -------------------------------------------------------------#
for xml_file in tqdm(glob.glob('{}/*xml'.format(path))):
tree = ET.parse(xml_file)
height = int(tree.findtext('./size/height'))
width = int(tree.findtext('./size/width'))
if height <= 0 or width <= 0:
continue
# -------------------------------------------------------------#
# 对于每一个目标都获得它的宽高
# -------------------------------------------------------------#
for obj in tree.iter('object'):
xmin = int(float(obj.findtext('bndbox/xmin'))) / width
ymin = int(float(obj.findtext('bndbox/ymin'))) / height
xmax = int(float(obj.findtext('bndbox/xmax'))) / width
ymax = int(float(obj.findtext('bndbox/ymax'))) / height
xmin = np.float64(xmin)
ymin = np.float64(ymin)
xmax = np.float64(xmax)
ymax = np.float64(ymax)
# 得到宽高
data.append([xmax - xmin, ymax - ymin])
return np.array(data)
if __name__ == '__main__':
np.random.seed(0)
input_shape = [200, 200] #图片的大小
anchors_num = 9
# -------------------------------------------------------------#
# 载入数据集,可以使用VOC的xml
# -------------------------------------------------------------#
path = './anonations'
# -------------------------------------------------------------#
# 载入所有的xml
# 存储格式为转化为比例后的width,height
# -------------------------------------------------------------#
print('Load xmls.')
data = load_data(path)
print('Load xmls done.')
# -------------------------------------------------------------#
# 使用k聚类算法
# -------------------------------------------------------------#
print('K-means boxes.')
cluster, near = kmeans(data, anchors_num)
print('K-means boxes done.')
data = data * np.array([input_shape[1], input_shape[0]])
cluster = cluster * np.array([input_shape[1], input_shape[0]])
# -------------------------------------------------------------#
# 绘图
# -------------------------------------------------------------#
for j in range(anchors_num):
plt.scatter(data[near == j][:, 0], data[near == j][:, 1]) #画出所有框的散点图
plt.scatter(cluster[j][0], cluster[j][1], marker='x', c='black') #cluster 表示聚类中心
plt.savefig("kmeans_for_anchors.jpg")
plt.show()
print('Save kmeans_for_anchors.jpg in root dir.')
hh=np.argsort(cluster[:, 0] * cluster[:, 1])
cluster = cluster[np.argsort(cluster[:, 0] * cluster[:, 1])]
print('avg_ratio:{:.2f}'.format(avg_iou(data, cluster)))
print('聚类中心',cluster)
f = open("yolo_anchors.txt", 'w')
row = np.shape(cluster)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (cluster[i][0], cluster[i][1])
else:
x_y = ", %d,%d" % (cluster[i][0], cluster[i][1])
f.write(x_y)
f.close()