pytorch实现卷积

官方文档

  1. CONV2D 文档 https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
  2. TORCH.NN.FUNCTIONAL.CONV2D 文档 https://pytorch.org/docs/stable/generated/torch.nn.functional.conv2d.html3

方法一:使用torch.nn

import torch
import torch.nn as nn
import torch.nn.functional as F


in_channels = 1    #输入通道数
out_channels = 1   #输出通道数
kernel_size = (2,3)    
batch_size = 1
# kernel_size: 可以传入标量,kernel就是方阵,也可以传入元组,此时kernel的长和宽不同
# stride 默认为1
# padding 默认为0
bias = False

# input_size有4个参数
input_size = [batch_size,in_channels, 4, 4]

conv_layer = torch.nn.Conv2d(in_channels, out_channels, kernel_size, bias = bias)
input_feature_map = torch.randn(input_size)  # torch.randn随机高斯函数  # 输入特征图
output_feature_map = conv_layer(input_feature_map)

print(input_feature_map)
print(conv_layer)
print(conv_layer.weight)  #实际就是打印kernel  #kernel 1*1*3*3 = out_channels*in_channels*height*width
print(output_feature_map)

方法二:使用torch.nn.functional

与torch.nn的api区别:需要手动指定weight和bias

output_feature_map1 = F.conv2d(input_feature_map, conv_layer.weight)
print(output_feature_map1)

input = input_feature_map #卷积输入特征图
kernel = conv_layer.weight.data #卷积核
print(kernel)

方法三:用原始矩阵运算实现二位卷积,先不考虑batchsize和channel维度

import math

input = torch.randn(5,5)
kernel = torch.randn(3,3)

def matrix_multiplication_for_conv2d(input,kernel,stride=1,padding = 0):
    
    if padding > 0:
        input = F.pad(input,(padding,padding,padding,padding))
    
    input_h,input_w = input.shape
    kernel_h,kernel_w = kernel.shape
    
    output_h = (math.floor((input_h - kernel_h)/stride) + 1) #卷积输出的高度
    output_w = (math.floor((input_w - kernel_w)/stride) + 1) #卷积输出的宽度
    output = torch.zeros(output_h, output_w) # 初始化输出矩阵
    
    for i in range(0, input_h-kernel_h+1, stride):  #对高度维进行遍历
        for j in range(0,input_w-kernel_w+1,stride): # 对宽度维进行遍历
            region = input[i:i+kernel_h, j:j+kernel_w] # 取出被核滑动到的区域
            output[int(i/stride),int(j/stride)] = torch.sum(region * kernel) #点乘,并赋值给输出位置的元素
            
    return output

mat_mul_conv_output = matrix_multiplication_for_conv2d(input,kernel,padding=1)
print(mat_mul_conv_output)

padding
padding = 1
上下左右各+1 ,例如: 从55变成66

input_feature_map * kernel dot点乘

output_feature_map计算方法:
没有padding (i - k)/stride + 1
有padding (i+ 2p -k)/stride + 1
stride大小会影响output_feature_map

你可能感兴趣的:(深度学习,python,pytorch,卷积神经网络)