conv2d = torch.nn.Conv2d(
in_chinnels, # (整数)输入图像的通道数
out_channels, # (整数)经过卷积后输出的通道数
kernel_size, # (整数或数组)卷积核的尺寸
stride=1, # (整数或数组)卷积的步长
padding=0, # (整数或数组)在输入两边进行0填充的数量
dilation=1, # (整数或数组)卷积核元素之间的步幅,可调整空洞卷积的空洞大小,默认为普通卷积
groups=1, # (整数)从输入通道到输出通道的阻塞连接数,分组卷据的组数,默认不分组
bias=True # 是否添加偏置
)
conv2d.bias # 偏置的具体内容
conv2d.bias.data # 偏置的具体data数值
conv2d.weight # 卷积核的具体内容
conv2d.weight.data # 卷积核的具体data数值
conv2d.weight.data[0] # 第0个卷积核的具体数值
# 其他
conv2d.in_channels
conv2d.out_channels
conv2d.kernel_size
conv2d.stride
conv2d.padding
conv2d.groups
torch.nn.Conv1d() # 1D卷积
torch.nn.Conv2d() # 2D卷积
torch.nn.Conv3d() # 3D卷积
torch.nn.ConvTranspose1d() # 1D转置卷积
torch.nn.ConvTranspose2d() # 2D转置卷积
torch.nn.ConvTranspose3d() # 3D转置卷积
torch.nn.MaxPool2d(
kernel_size, # (整数或数组)池化窗口大小
stride=None, # (整数或数组)池化串口移动步长,默认和kernel_size一样
padding=0, # (整数或数组)输入的每一条边补充0的层数
dilation=1, # (整数或数组)控制窗口元素步幅
return_indices=False, # True时返回输出最大值的索引,便于MaxUnpool2d操作
ceil_mode=False # 计算结果是否向上取整,默认向下取整
)
conv2d.padding
conv2d.kernel_size
conv2d.stride
conv2d.ceil_mode
conv2d.dilation
conv2d.return_indices
# 最大池化
torch.nn.MaxPool1d()
torch.nn.MaxPool2d()
torch.nn.MaxPool3d()
# 最大池化逆池化
torch.nn.MaxUnpool1d()
torch.nn.MaxUnpool2d()
torch.nn.MaxUnpool3d()
# 平均池化
torch.nn.AvgPool1d()
torch.nn.AvgPool2d()
torch.nn.AvgPool3d()
# 自适应最大池化
torch.nn.AdaptiveMaxPool1d()
torch.nn.AdaptiveMaxPool2d()
torch.nn.AdaptiveMaxPool3d()
# 自适应平均池化
torch.nn.AdaptiveAvgPool1d()
torch.nn.AdaptiveAvgPool2d()
torch.nn.AdaptiveAvgPool3d()
import torch
torch.nn.Sigmoid()
torch.nn.Tanh()
torch.nn.Relu()
torch.nn.Softplus() # Relu激活函数的平滑近似
x = torch.linspace(-6,6,100)
sigmod = nn.Sigmoid()
relu = nn.ReLU()
tanh = nn.Tanh()
softplus = nn.Softplus()
y_sigmod = sigmod(x)
y_relu = relu(x)
y_tanh = tanh(x)
y_softplus = softplus(x)
rnn = torch.nn.RNN(
input_size, # 输入参数x的特征数量
hidden_size, # 隐藏层的特征数量
num_layers=1, # RNN网络循环层的数量
nonlinearity='tanh', # 只能是tanh或relu
bias=True, # RNN层是否使用偏置权重
batch_first=False, # 输入输出的shape是否应为(batch_size,time_steq,feature)
dropout=0, # 除了最后一层的其他RNN层是否都会套上一个dropout层
bidirectional=False # 是否是双向循环RNN
)
简单示例:
rnn = nn.RNN(10, 20, 2)
input = torch.randn(5, 3, 10)
h0 = torch.randn(2, 3, 20)
output, hn = rnn(input, h0)
import torch
torch.nn.LSTM() # 多层长短期记忆LSTM单元
torch.nn.GRU() # 多层门限循环GRU单元
torch.nn.RNNCell() # 一个RNN循环层单元
torch.nn.LSTMCell() # 一个长短期记忆LSTM单元
torch.nn.GRUCell() # 一个门限循环GRU单元
fc = torch.nn.Linear(
in_features, # 单样本的输入特征数量
out_features, # 单样本输出的特征数量
bias=True # 该层是否学习偏置
)
fc.bias # out_features 个偏置值,或者 None
fc.weight # 形状为 (out_features,in_features) 的权值
fc.in_features # 单样本的输入特征数量
fc.out_features # 单样本输出的特征数量