dw, dh = np.mod(dw, 32), np.mod(dh, 32)
。# 图片变形
def letterbox(img, new_shape=(416, 416), color=(0, 0, 0),
auto=True, scaleFill=False, scaleup=True, interp=cv2.INTER_AREA):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width] (1920,1080,3)
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = max(new_shape) / max(shape)# 计算缩放比例
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios 等比缩小
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding 模计算
pass# 造成长宽不同的原因在于此
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides # 填充分两边
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=interp) # INTER_AREA is better, INTER_LINEAR is faster
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border 添加边框
return img, ratio, (dw, dh)
由于输入是矩形的,则输出应当也是矩形(这里考虑TensorRT没问题的情况,不匹配请重新转化一遍),则需要在原来的基础上添加判断形状的代码
我在init中吧grid_分成x和y,用以代表矩形的两边
# self.grid_size = 0 # grid size 分成 grid_size_x grid_size_y,用于矩形推理的实现
self.grid_size_x = 0
self.grid_size_y = 0
# 检测框具体顺序为 Center x,Center y,Width,Height
# x的说明:若图片输入非正方形 如:256x416
# x[2],x[3] = 256/32 = 8 , 416/32 = 13 下一层以此类推
grid_size_y = x.size(2)
grid_size_x = x.size(3)
# 注释说明
# prediction 的维度为 batch_size, num_anchors=3, grid_size, grid_size, num_classes + 5(coco:85)
prediction = (
x.view(num_samples, self.num_anchors, self.num_classes + 5, grid_size_y, grid_size_x)
.permute(0, 1, 3, 4, 2) # permute: 将维度换位
.contiguous()
)
compute_grid_offsets
方法,它是计算网格偏移的,用于抵消图像变化所带来的框体偏移def compute_grid_offsets(self, grid_size_y,grid_size_x, img_dim, cuda=True, Half=False):
# self.grid_size = grid_size
# [x,y] 由于x y 可能不同,则所有有关x、y都需要分开
self.grid_size_x = grid_size_x
self.grid_size_y = grid_size_y
gx = self.grid_size_x
gy = self.grid_size_y
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
FloatTensor = torch.cuda.HalfTensor if Half else torch.cuda.FloatTensor
self.img_dim = img_dim
# 步长一定要是正方形
self.stride = self.img_dim / max(gx, gy)
# Calculate offsets for each grid
# self.grid_x = torch.arange(gx).repeat(gy, 1).view([1, 1, gy, gx]).type(FloatTensor)
self.grid_x = torch.arange(gx).repeat(gy, 1).view([1, 1, gy, gx]).type(FloatTensor)
# self.grid_y = torch.arange(gx).repeat(gy, 1).view([1, 1, gy, gx]).type(FloatTensor)
# self.grid_y = torch.arange(gx).repeat(gy, 1).t().contiguous().view([1, 1, gy, gx]).type(FloatTensor)
# 这里的grid y 需要与gridx 的顺序不同
self.grid_y = torch.arange(gy).repeat(gx, 1).t().contiguous().view([1, 1, gy, gx]).type(FloatTensor)
self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
# self.scaled_anchors = FloatTensor([(a_w / self.stride, a_h / self.stride) for a_w, a_h in self.anchors])
self.anchor_w = self.scaled_anchors[:, 0].view((1, self.num_anchors, 1, 1))
self.anchor_h = self.scaled_anchors[:, 1].view((1, self.num_anchors, 1, 1))
self.grid_y = torch.arange(gy).repeat(gx, 1).t().contiguous().view([1, 1, gy, gx]).type(FloatTensor)