module是所有神经网络模块的基类,我们自己构造的模型也应该继承这个类。
import torch
from torch import nn
class LR(nn.Module):
def __init__(self) -> None: # 重写方法快捷键 alt+insert
super().__init__()
def forward(self, input):
output = input + 1
return output
lrp = LR()
x = torch.Tensor(1.0)
output = lrp(x)
print(output)
卷积层
import torch
import torch.nn.functional as F
input = torch.tensor([[1, 2, 0, 3, 1],
[0, 1, 2, 3, 1],
[1, 2, 1, 0, 0],
[5, 2, 3, 1, 1],
[2, 1, 0, 1, 1]]
)
kernel = torch.tensor([[1, 2, 1],
[0, 1, 0],
[2, 1, 0]])
input = torch.reshape(input, (1, 1, 5, 5)) # 括号里面是想reshape成的形状 batchsize数和通道数
kernel = torch.reshape(kernel, (1, 1, 3, 3))
print(input.shape)
print(kernel.shape)
output = F.conv2d(input, kernel, stride=1)
print(output)
conv2d:在由多个输入平面组成的输入数据上应用二维卷积
import torch
from mmcv.cnn import Conv2d
from torch import nn
import torchvision
from mmcv import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset = torchvision.datasets.CIFAR10(r"C:\Users\123\Desktop\python4.7\test03\data", train=False, transform=torchvision.transforms.ToTensor(),
download=True) # 这里的train表示的是需要训练数据集还是测试数据集
dataloader = DataLoader(dataset, batch_size=64)
class LR(nn.Module):
def __init__(self): # 初始化方法
super(LR, self).__init__()
self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0)
def forward(self, x):
x = self.conv1(x)
return x
lrp = LR()
# print(lrp)
writer = SummaryWriter("../logs")
step = 0
for data in dataloader:
imgs, targets = data
output = lrp(imgs)
# print(output)
# print(imgs.shape)
# print(output.shape)
# torch.Size([64, 3, 32, 32])
writer.add_images("input", imgs, step, dataformats='NCHW')
# torch.Size([64, 6, 30, 30]) # 彩色图像是3个channel,6个channel不知道怎样显示
output = torch.reshape(output, (-1, 3, 30, 30)) # -1具体值会根据后面给出的结果进行计算
writer.add_images("output", output, step, dataformats='NCHW')
step = step+1
writer.close()