本博文记录第一次修改yolov5,只是尝试,不做工程效率的考虑
最近尝试修改yolov5的网络结构,在这里记录一下
第一步:加入模块代码
将模型搭建中所需的模块放入YOLOv5的源码路径models/common.py里,以ShuffleNetV2的InvertedResidual为例:
在common.py的顶部加入导入
from torch import Tensor
from typing import Callable, Any, List
将InvertedResidual类和InvertedResidual类需要的channel_shuffle函数都加入到common.py的底部
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(
self,
inp: int,
oup: int,
stride: int
) -> None:
super(InvertedResidual, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
else:
self.branch1 = nn.Sequential()
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features,
branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(
i: int,
o: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
bias: bool = False
) -> nn.Conv2d:
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x: Tensor) -> Tensor:
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
目录在models/yolo.py的parse_model函数
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR, SELayer, CBAM, CoorAttention, ShuffleNetV2_InvertedResidual, conv_bn_relu_maxpool, StemBlock,
conv_bn_hswish, MobileNetV3_InvertedResidual]:
在目录models下新建yolov5-shufflenetv2-focus.yaml文件,配置如下
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 0.5 # layer channel multiple
# anchors
anchors:
- [4,5, 8,10, 13,16] # P3/8
- [23,29, 43,55, 73,105] # P4/16
- [146,217, 231,300, 335,433] # P5/32
# Custom backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P2/4
[-1, 1, InvertedResidual, [128, 2]], # 1-P3/8
[-1, 3, InvertedResidual, [128, 1]], # 2
[-1, 1, InvertedResidual, [256, 2]], # 3-P4/16
[-1, 7, InvertedResidual, [256, 1]], # 4
[-1, 1, InvertedResidual, [512, 2]], # 5-P5/32
[-1, 3, InvertedResidual, [512, 1]], # 6
]
# YOLOv5 head
head:
[[-1, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P4
[-1, 1, C3, [128, False]], # 10
[-1, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 2], 1, Concat, [1]], # cat backbone P3
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
[-1, 1, Conv, [128, 3, 2]],
[[-1, 11], 1, Concat, [1]], # cat head P4
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
[-1, 1, Conv, [128, 3, 2]],
[[-1, 7], 1, Concat, [1]], # cat head P5
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]
在train.py文件中
parser.add_argument('--cfg', type=str, default='models/yolov5-shufflenetv2-focus.yaml', help='model.yaml path')
目标检测 YOLOv5 自定义网络结构_深度学习-CSDN博客_yolov5修改网络结构
GitHub - shaoshengsong/YOLOv5-ShuffleNetV2
【YOLOV5-5.x 源码解读】common.py_满船清梦压星河HK的博客-CSDN博客