神经网络可视化方法总结

目录

1--利用netron库可视化神经网络

2--利用tensorboard可视化神经网络

3--FCNN实现Mnist数据集分类的tensorboard可视化

4--参考

​​​​​​​​​​​​​​

1--利用netron库可视化神经网络

介绍:netron是一个深度学习模型可视化库,其可视化pytorch神经网络模型的两个步骤为:

pytorch保存神经网络模型为onnx格式,代码如下:

torch.onnx.export(model, data, onnx_path)
# model为神经网络模型
# data为模型输入数据
# onnx_path为模型保存的文件名

导入onnx模型文件至netron,实现可视化,代码如下:

netron.start(onnx_path)
# onnx_path为onnx格式神经网络的文件名

完整示例代码如下:

# 导入第三方库
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx 
import netron
 
    
# 搭建神经网络模型
class model(nn.Module):
    def __init__(self):
        super(model, self).__init__()
        
        self.conv1 = nn.Conv2d(3, 64, 3, padding=1, bias=False)
        
        self.block1 = nn.Sequential(
            nn.Conv2d(64, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            
            nn.Conv2d(64, 32, 1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            
            nn.Conv2d(32, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64)
        )
 
        self.output = nn.Sequential(
            nn.Conv2d(64, 1, 3, padding=1, bias=True),
            nn.Sigmoid()
        )
 
    def forward(self, x):
        x = self.conv1(x)
        residual = x
        x = F.relu(self.block1(x) + residual)
        x = self.output(x)
        
        return x
        

model = model() # 模型
data = torch.rand(1, 3, 416, 416) # 数据
onnx_path = "onnx_model_name.onnx" # 文件名

torch.onnx.export(model, data, onnx_path) # 导出神经网络模型为onnx格式
 
netron.start(onnx_path) # 启动netron

可视化效果:

神经网络可视化方法总结_第1张图片

2--利用tensorboard可视化神经网络

​​​​​​​代码:

# 导入第三方库
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch.autograd import Variable


# 搭建神经网络模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        self.conv1 = nn.Conv2d(3, 64, 3, padding=1, bias=False)
        
        self.block1 = nn.Sequential(
            nn.Conv2d(64, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            
            nn.Conv2d(64, 32, 1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            
            nn.Conv2d(32, 64, 3, padding=1, bias=False),
            nn.BatchNorm2d(64)
        )
        
        self.output = nn.Sequential(
            nn.Conv2d(64, 1, 3, padding=1, bias=True),
            nn.Sigmoid()
        )
        
    
    def forward(self, x):
        x = self.conv1(x)
        residual = x
        x = F.relu(self.block1(x) + residual)
        x = self.output(x)
        
        return x


Input_data = Variable(torch.rand(1, 3, 416, 416)) # 输入数据
Model = Net() # 模型

with SummaryWriter(comment = 'Net') as w:
    w.add_graph(Model, (Input_data, ))

运行代码后会生成一个runs的文件夹,里面拥有一个events.out.tfevents的文件,执行以下代码:

tensorboard --logdir path 
# path为events.out.tfevents文件所在文件夹的路径

结果:打开下图红框的网址即可查看和下载可视化的神经网络模型

神经网络可视化方法总结_第2张图片

神经网络可视化方法总结_第3张图片

3--FCNN实现Mnist数据集分类的tensorboard可视化

代码:

# 导入第三方库
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter

# FCNN模型
class Net(torch.nn.Module):
    def __init__(self):    # 构造函数
        super(Net, self).__init__()
        self.l1 = torch.nn.Linear(784, 512) # 线性层,输入纬度,输出纬度
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)


    def forward(self, x):
        x = x.view(-1, 784)  # view函数相当于numpy中的reshape函数,-1表示一个不确定的数,784表示确定的列
        x = F.relu(self.l1(x)) # relu()激活函数
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)


# 训练函数
def train(epoch, model, train_loader, criterion, optimizer):
    running_loss = 0.0
    epoch_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0): # 遍历函数,0表示从第0个元素开始,返回数据下标和数值
        inputs, target = data #特征,标签
        optimizer.zero_grad() #梯度归零

        # forward + backward + updata
        outputs = model(inputs)
        loss = criterion(outputs, target) #计算损失
        loss.backward() #反向传播梯度值
        optimizer.step() #更新参数

        running_loss += loss.item() #得到元素张量的一个元素值,将张量转换成浮点数
        epoch_loss += loss.item()
        if batch_idx % 300 == 299: # 每300个batch输出一次平均loss
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0
    
    return epoch_loss / (batch_idx + 1)

# 测试函数
def test(model, test_loader):
    correct = 0
    total = 0
    with torch.no_grad():  #数据不计算梯度
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim = 1) #predicted为tensor每行最大值的索引
            total += labels.size(0) #总样本
            correct += (predicted == labels).sum().item() #预测准确的样本数
    print('Accuracy on test set: %d %%' % (100 * correct / total)) #准确率

    return 100 * correct / total

# main函数
def main():
    
    # batch大小
    batch_size = 64  

    # 数据预处理
    transform = transforms.Compose([
        transforms.ToTensor(), #将shape为(H, W, C)的img转为shape为(C, H, W)的tensor,将每一个数值归一化到[0,1]
        transforms.Normalize((0.1307, ), (0.3081, )) #按通道进行数据标准化
    ])
    
    # FCNN模型
    model = Net()
    
    # 损失和优化器
    criterion = torch.nn.CrossEntropyLoss()  # 计算交叉熵损失
    optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum = 0.5) #构建优化器,lr为学习率,momentum为冲量因子

    # 下载数据集
    train_dataset = datasets.MNIST(root = '../PytorchCode/dataset/mnist/', train = True, download = True, transform = transform)

    train_loader = DataLoader(train_dataset, shuffle = True, batch_size = batch_size)

    test_dataset = datasets.MNIST(root = '../PytorchCode/dataset/mnist/', train = False, download = True, transform = transform)

    test_loader = DataLoader(test_dataset, shuffle = False, batch_size = batch_size)
    
    # FCNN可视化
    writer = SummaryWriter(comment = 'FCNN')
    writer.add_graph(model, (torch.rand(64, 784), ))
    
    # 训练及测试
    for epoch in range(10):
        
        epoch_loss = train(epoch, model, train_loader, criterion, optimizer) # 训练返回平均每个batch的损失
        writer.add_scalar('Train', epoch_loss, epoch) # 保存loss的数据与epoch数值
        
        Accuracy = test(model, test_loader) # 返回准确率
        writer.add_scalar('Test', Accuracy, epoch) # 保存Accuracy的数据与epoch数值

# 执行main函数
main()

结果:

神经网络可视化方法总结_第4张图片

 

神经网络可视化方法总结_第5张图片

4--参考

参考链接1​​​​​​​

你可能感兴趣的:(神经网络,深度学习,pytorch)