使用NumPy和PyTorch实现反卷积/转置卷积操作

前言

转置卷积又被称为反卷积和逆卷积,但是转置卷积才是最正规和主流的叫法,在主流的深度学习框架中,转置卷积的函数名都是conv_transpose
因为转置卷积的运算过程很容易让人误解,举一个例子,一个4*4的输入经过3*3的卷积核(stride=1,padding=0)输出为2*2,而转置卷积将这个输出当作输入,即一个2*2的输入经过3*3的转置卷积核输出为4*4,看起来很像是卷积的逆操作,但事实并不是一个逆过程的概念

转置卷积

用公式可以更好的说明转置卷积与逆卷积的不同
卷积运算可以这么表示:
y = C x y = Cx y=Cx
如果按照线性代数中的逆矩阵的概念,逆卷积的公式应该是这样:
x = C − 1 y x = C^{-1}y x=C1y
但是转置卷积的真实公式是:
x = C T y x = C^{T}y x=CTy
参考https://zhuanlan.zhihu.com/p/79680474给出的用Numpy实现转置卷积,可以更直观的理解

from torch import nn
import numpy as np
import torch
batch_size = 1
stride = 1
padding = "VALID"
input_channel = 1
input_size = 5
output_channel = 1
filter_size = 3
output_size = 3

input_np = np.reshape(np.arange(input_size*input_size, dtype="float32"),newshape=[input_size,input_size])
print(f"input_np = {input_np}")
print("input_up.shape = %s" % str(input_np.shape))

input_np_flattern = np.reshape(input_np, newshape=[input_size*input_size, 1])
print(f"input_np_flattern = {input_np_flattern}")
print("input_np_flattern = %s" % str(input_np_flattern.shape))

filter_np = np.reshape(np.arange(filter_size*filter_size, dtype="float32"),newshape=[filter_size,filter_size])
print(f"filter_np = {filter_np}")
print("filter_np.shape = %s" % str(filter_np.shape))

filter_np_matrix = np.zeros((output_size,output_size,input_size,input_size))
print(filter_np_matrix.shape)
# 卷积
for h in range(output_size):
    for w in range(output_size):
        start_h = h*stride
        start_w = w*stride
        end_h = start_h + filter_size
        end_w = start_w + filter_size
        filter_np_matrix[h, w, start_h:end_h, start_w:end_w] = filter_np
filter_np_matrix = np.reshape(filter_np_matrix,newshape=[output_size*output_size, input_size*input_size])
print(f"filter_np_matrix = {filter_np_matrix}")
print("filter_np_matrix.shape = %s" % str(filter_np_matrix.shape))
# 相乘
output_np = np.dot(filter_np_matrix, input_np_flattern)
output_np = np.reshape(output_np, newshape=[output_size, output_size])
print(f"output_np = {output_np}")
print("output_np.shape = %s" % str(output_np.shape))

# 反卷积
output_np_flattern = np.reshape(output_np, newshape=[output_size*output_size, 1])
output_np_transpose = np.dot(filter_np_matrix.T, output_np_flattern)
output_np_transpose = np.reshape(output_np_transpose, newshape=[input_size,input_size])
print(f"output_np_transpose = {output_np_transpose}")
print("output_np_transpose.shape = %s" % str(output_np_transpose.shape))

用PyTorch实现转置卷积

仿照沐神的动手深度学习一书,用PyTorch重写了下

# 初始化权重
conv_weight = torch.arange(1,17,dtype=torch.float32).reshape((1,1,4,4))
conv_input = torch.arange(1,10,dtype=torch.float32).reshape((1,1,3,3))
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, padding=0)
print(conv.weight)

conv.weight = torch.nn.Parameter(conv_weight)
print(conv.weight)
print(conv.forward(conv_input))
# 用矩阵乘法实现卷积运算
W, k = torch.zeros((4,16)) , torch.zeros(11)
k[:3], k[4:7], k[8:] = conv_input[0, 0, 0, :], conv_input[0, 0, 1, :], conv_input[0, 0, 2, :]
W[0, 0:11], W[1, 1:12], W[2, 4:15], W[3, 5:16] = k, k, k, k
print(torch.mm(W,conv_input.reshape(16,-1)).reshape((1,1,2,2)))

# conv2d
conv = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=4, padding=1, stride=2)
conv_weight = torch.arange(0,480,dtype=torch.float32).reshape((10,3,4,4))
conv.weight = torch.nn.Parameter(conv_weight)   # 初始化conv

print(f"conv_weight_shape={conv_weight.shape}")
print(conv)
conv_input = torch.randn((1,3,64,64),dtype=torch.float32)
conv_output = conv.forward(conv_input)
print(f"conv_output={conv_output.shape}")  # 经过conv2d之后,原图尺寸缩小了两倍
# transpose conv
conv_trans = nn.ConvTranspose2d(in_channels=10,out_channels=3,kernel_size=4,padding=1,stride=2)
conv_trans_weight = torch.arange(0,480,dtype=torch.float32).reshape((3,10,4,4))
conv_trans_weight.weight = torch.nn.Parameter(conv_trans_weight)   # 初始化conv
print(f"conv_trans_weight_shape={conv_trans_weight.shape}")
print(f"conv_trans_output={conv_trans.forward(conv_output).shape}")

你可能感兴趣的:(深度学习,深度学习,numpy,神经网络,pytorch)