FSRCNN是对之前SRCNN的改进,主要体现在更快的加速和更出色的修复质量上。
作者将FSRCNN分为五个部分:
激活函数:PReLu。主要是为了避免ReLu中零梯度导致的“死特征”。
损失函数:MSE。
The transitions from SRCNN to FSRCNN:
可以从图中看出来,这种加速并不是以性能下降为代价的。
from torch import nn
class FSRCNN(nn.Module):
def __init__(self, scale, inputChannel=1, outputChannel=1):
super(FSRCNN, self).__init__()
self.firstPart = nn.Sequential(
nn.Conv2d(inputChannel, 56, kernel_size=5, padding=5 // 2),
nn.PReLU(56)
)
self.midPart = nn.Sequential(
nn.Conv2d(56, 12, kernel_size=1),
nn.PReLU(12),
nn.Conv2d(12, 12, kernel_size=3, padding=3 // 2),
nn.PReLU(12),
nn.Conv2d(12, 12, kernel_size=3, padding=3 // 2),
nn.PReLU(12),
nn.Conv2d(12, 12, kernel_size=3, padding=3 // 2),
nn.PReLU(12),
nn.Conv2d(12, 12, kernel_size=3, padding=3 // 2),
nn.PReLU(12),
nn.Conv2d(12, 56, kernel_size=1),
nn.PReLU(56),
)
self.lastPart = nn.Sequential(
nn.ConvTranspose2d(56, outputChannel, kernel_size=9, stride=scale, padding=9//2, output_padding=scale-1),
)
def forward(self, x):
x = self.firstPart(x)
x = self.midPart(x)
out = self.lastPart(x)
return out
https://zhuanlan.zhihu.com/p/31664818
https://arxiv.org/abs/1608.00367