python实现Moaic数据增强

p y t h o n 实现 M o a i c 数据增强 python实现Moaic数据增强 python实现Moaic数据增强

Moaic数据增强:对四张图片进行拼接,获得一张新的图片,同时获得这张图片对应的标签框。

主要原理:把4张图片,通过随机缩放、随机裁减、随机排布的方式进行拼接,再拼接到一张图上作为训练数据。yolov5之后加上了放射变换处理。

Mosaic有如下优点:
(1)丰富数据集,加强了小目标检测,加强了面对复杂环境的情况等等:随机使用4张图片,随机缩放,再随机分布进行拼接,丰富了检测数据集,随机缩放增加了很多小目标,让网络的鲁棒性更强;
(2) 减少对mini-batch的依赖,降低对GPU显存要求:直接计算4张图片的数据,使得Mini-batch大小并不需要很大就可以达到比较好的效果,四张图片拼接在一起变相提高了batch_size,在进行batch normalization(归一化)的时候也会计算四张图片。

import cv2
import torch
import random
import os.path
import numpy as np
import matplotlib.pyplot as plt
from camvid import get_bbox, draw_box

def load_mosaic(im_files, name_color_dict):
    im_size = 640
    s_mosaic = im_size * 2
    mosaic_border = [-im_size // 2, -im_size // 2]
    labels4, segments4, colors = [], [], []
    # mosaic center x, y
    y_c, x_c = (int(random.uniform(-x, s_mosaic + x)) for x in mosaic_border)
    
    img4 = np.full((s_mosaic, s_mosaic, 3), 114, dtype=np.uint8)
    seg4 = np.full((s_mosaic, s_mosaic), 0, dtype=np.uint8)
    
    for i, im_file in enumerate(im_files):
        # Load image
        img = cv2.imread(im_file)
        seg_file = im_file.replace('images', 'labels')
        name = os.path.basename(seg_file).split('.')[0]
        seg_file = os.path.join(os.path.dirname(seg_file), name + '_L.png')
        seg, boxes, color = get_bbox(seg_file, names, name_color_dict)
        colors += color
        h, w, _ = np.shape(img)
        
        # place img in img4
        if i == 0:  # top left
            x1a, y1a, x2a, y2a = max(x_c - w, 0), max(y_c - h, 0), x_c, y_c
            x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h 
        elif i == 1:  # top right
            x1a, y1a, x2a, y2a = x_c, max(y_c - h, 0), min(x_c + w, s_mosaic), y_c
            x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
        elif i == 2:  # bottom left
            x1a, y1a, x2a, y2a = max(x_c - w, 0), y_c, x_c, min(s_mosaic, y_c + h)
            x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
        elif i == 3:  # bottom right
            x1a, y1a, x2a, y2a = x_c, y_c, min(x_c + w, s_mosaic), min(s_mosaic, y_c + h)
            x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
        img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b]
        
        # place seg in seg4
        seg4[y1a:y2a, x1a:x2a] = seg[y1b:y2b, x1b:x2b]
        
        # update bbox
        padw = x1a - x1b
        padh = y1a - y1b
        boxes = xywhn2xyxy(boxes, padw=padw, padh=padh)
        labels4.append(boxes)
    labels4 = np.concatenate(labels4, 0)
    for x in labels4[:, 1:]:
        np.clip(x, 0, s_mosaic, out=x)  # clip coord
    
    # draw result
    draw_box(seg4, labels4, colors)
    
    return img4, labels4,seg4

if __name__ == '__main__':
    names = ['Pedestrian', 'Car', 'Truck_Bus']
    
    im_files = ['camvid/images/0016E5_01440.png',
                'camvid/images/0016E5_06600.png',
                'camvid/images/0006R0_f00930.png',
                'camvid/images/0006R0_f03390.png']

    load_mosaic(im_files, name_color_dict)

一 确定拼接中心点

二 放置

三 贴图

四 计算小图到大图上时所产生的偏移,用来计算mosaic增强后的标签的位置

五 进行mosaic的时候将四张图片整合到一起之后shape为[2img_size,2img_size]

六 对mosaic整合的图片进行随机旋转、平移、缩放、裁剪

七 resize为输入大小img_size


def random_perspective(im,
                       targets=(),
                       segments=(),
                       degrees=10,
                       translate=.1,
                       scale=.1,
                       shear=10,
                       perspective=0.0,
                       border=(0, 0)):
    # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
    # targets = [cls, xyxy]

    height = im.shape[0] + border[0] * 2  # shape(h,w,c)
    width = im.shape[1] + border[1] * 2

    # Center:平移改变旋转中心
    C = np.eye(3)
    C[0, 2] = -im.shape[1] / 2  # x translation (pixels)
    C[1, 2] = -im.shape[0] / 2  # y translation (pixels)

    # Perspective:透视变换
    P = np.eye(3)
    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)
    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)

    # Rotation and Scale,旋转和缩放
    R = np.eye(3)
    a = random.uniform(-degrees, degrees)
    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations
    s = random.uniform(1 - scale, 1 + scale)
    # s = 2 ** random.uniform(-scale, scale)
    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)

    # Shear:切变
    S = np.eye(3)
    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)
    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)

    # Translation:平移
    T = np.eye(3)
    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation (pixels)
    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation (pixels)

    # Combined rotation matrix
    M = T @ S @ R @ P @ C  # 合并所有变换矩阵,从右到左做一次执行,先改变旋转中心,再透视变换,旋转和缩放,切变,最后平移
    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed
        if perspective:
            im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
        else:  # affine
            im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))

    # Visualize
    # import matplotlib.pyplot as plt
    # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
    # ax[0].imshow(im[:, :, ::-1])  # base
    # ax[1].imshow(im2[:, :, ::-1])  # warped

    # Transform label coordinates
    n = len(targets)
    if n:
        use_segments = any(x.any() for x in segments)
        new = np.zeros((n, 4))
        if use_segments:  # warp segments
            segments = resample_segments(segments)  # upsample
            for i, segment in enumerate(segments):
                xy = np.ones((len(segment), 3))
                xy[:, :2] = segment
                xy = xy @ M.T  # transform
                xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]  # perspective rescale or affine

                # clip
                new[i] = segment2box(xy, width, height)

        else:  # warp boxes
            xy = np.ones((n * 4, 3))
            xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)  # x1y1, x2y2, x1y2, x2y1
            xy = xy @ M.T  # transform
            xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)  # perspective rescale or affine

            # create new boxes
            x = xy[:, [0, 2, 4, 6]]
            y = xy[:, [1, 3, 5, 7]]
            new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

            # clip
            new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
            new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)

        # filter candidates
        i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
        targets = targets[i]
        targets[:, 1:5] = new[i]

    return im, targets

补充:

warpPerspective()函数中,最后面两个参数 boarderMode,boarderValue声明在转换后的图片背景颜色。

其中:

**borderMode:**说明背景外插模式,可以选择的模式包括:
cv2.BORDER_CONSTANT
cv2.BORDER_REPLICATE
boardValue: 外插背景颜色,可以使用三元组(R,G,B):0 ~ 255说明背景颜色。

https://blog.csdn.net/weixin_46142822/article/details/123805663

你可能感兴趣的:(1024程序员节)