pytorch 网络搭建简要步骤

目录

  • 一、数据集加载
  • 二、搭建模型
    • 1.继承torch.nn.Module
    • 2.利用容器torch.nn.Sequential
    • 3.利用现有的预训练网络
  • 三、配置模型
  • 四、训练模型
  • 参考链接

一、数据集加载

DataLoaderImageFolder函数

from torchvision.datasets import ImageFolder
from torchvision import transforms
from torch.utils.data import DataLoader

data_transform = transforms.Compose([
    transforms.ToTensor(), #Converts a PIL.Image or numpy.ndarray to torch.FloatTensor
    transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5, 0.5, 0.5]),
    transforms.ConvertImageDtype(torch.float)
])
dataset = ImageFolder("YOUR IMAGE DIRECTORY",transform = data_transform)
train_loader = DataLoader(dataset=dataset, batch_size=BATCH_SIZE, shuffle=True)

 

二、搭建模型

1.继承torch.nn.Module

继承Module,重写forward函数。
例:

class net(nn.Module):
    def __init__(self, in_size, out_size):
        super(unetUp, self).__init__()
        self.conv1 = nn.Conv2d(in_size, out_size, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(out_size, out_size, kernel_size=3, padding=1)
        self.up = nn.UpsamplingBilinear2d(scale_factor=2)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, inputs1, inputs2):
        outputs = torch.cat([inputs1, self.up(inputs2)], 1)
        outputs = self.conv1(outputs)
        outputs = self.relu(outputs)
        outputs = self.conv2(outputs)
        outputs = self.relu(outputs)
        return outputs

2.利用容器torch.nn.Sequential

例:

model = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )

3.利用现有的预训练网络

import torchvision.models as models
resnet18 = models.resnet18(pretrained=True)
alexnet = models.alexnet(pretrained=True)
squeezenet = models.squeezenet1_0(pretrained=True)
vgg16 = models.vgg16(pretrained=True)
densenet = models.densenet161(pretrained=True)
inception = models.inception_v3(pretrained=True)
googlenet = models.googlenet(pretrained=True)
shufflenet = models.shufflenet_v2_x1_0(pretrained=True)
mobilenet_v2 = models.mobilenet_v2(pretrained=True)
mobilenet_v3_large = models.mobilenet_v3_large(pretrained=True)
mobilenet_v3_small = models.mobilenet_v3_small(pretrained=True)
resnext50_32x4d = models.resnext50_32x4d(pretrained=True)
wide_resnet50_2 = models.wide_resnet50_2(pretrained=True)
mnasnet = models.mnasnet1_0(pretrained=True)
efficientnet_b0 = models.efficientnet_b0(pretrained=True)
efficientnet_b1 = models.efficientnet_b1(pretrained=True)
efficientnet_b2 = models.efficientnet_b2(pretrained=True)
efficientnet_b3 = models.efficientnet_b3(pretrained=True)
efficientnet_b4 = models.efficientnet_b4(pretrained=True)
efficientnet_b5 = models.efficientnet_b5(pretrained=True)
efficientnet_b6 = models.efficientnet_b6(pretrained=True)
efficientnet_b7 = models.efficientnet_b7(pretrained=True)
regnet_y_400mf = models.regnet_y_400mf(pretrained=True)
regnet_y_800mf = models.regnet_y_800mf(pretrained=True)
regnet_y_1_6gf = models.regnet_y_1_6gf(pretrained=True)
regnet_y_3_2gf = models.regnet_y_3_2gf(pretrained=True)
regnet_y_8gf = models.regnet_y_8gf(pretrained=True)
regnet_y_16gf = models.regnet_y_16gf(pretrained=True)
regnet_y_32gf = models.regnet_y_32gf(pretrained=True)
regnet_x_400mf = models.regnet_x_400mf(pretrained=True)
regnet_x_800mf = models.regnet_x_800mf(pretrained=True)
regnet_x_1_6gf = models.regnet_x_1_6gf(pretrained=True)
regnet_x_3_2gf = models.regnet_x_3_2gf(pretrained=True)
regnet_x_8gf = models.regnet_x_8gf(pretrained=True)
regnet_x_16gf = models.regnet_x_16gf(pretrainedTrue)
regnet_x_32gf = models.regnet_x_32gf(pretrained=True)

 

三、配置模型

配置:

#多块显卡并行
device_ids = [0, 1]
net = torch.nn.DataParallel(net, device_ids=device_ids)
#优化开启
cudnn.benchmark = True
#cuda
net = net.cuda()

损失函数和优化器:

optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)   # 学习率为0.01
criterion = nn.CrossEntropyLoss()

内置优化器:

Adadelta:实现 Adadelta 算法。

Adagrad:实现 Adagrad 算法。

Adam:实现Adam算法。

AdamW:实现 AdamW 算法。

SparseAdam:实现适用于稀疏张量的 Adam 算法的惰性版本。

Adamax:实现 Adamax 算法(基于无穷范数的 Adam 变体)。

ASGD:实现平均随机梯度下降。

LBFGS:实现 L-BFGS 算法,参考minFunc 。

NAdam:实现 NAdam 算法。

RAdam:实现 RAdam 算法。

RMSprop:实现 RMSprop 算法。

Rprop:实现弹性反向传播算法。

SGD:实现随机梯度下降(可选动量)。

四、训练模型

训练
简单例子:

for input, target in dataset:
    optimizer.zero_grad()
    output = model(input)
    loss = loss_fn(output, target)
    loss.backward()
    optimizer.step()

Unet训练的例程:

for i in epochs:
	total_loss = 0.0
	for iteration, batch in enumerate(trainloader, 0):
		if iteration >= epoch_size: 
		    break
		imgs, pngs, labels = batch
		with torch.no_grad():
		    imgs = torch.from_numpy(imgs).type(torch.FloatTensor)
		    pngs = torch.from_numpy(pngs).type(torch.FloatTensor).long()
		    labels = torch.from_numpy(labels).type(torch.FloatTensor)
		    #use cuda
		    imgs = imgs.cuda()
		    pngs = pngs.cuda()
		    labels = labels.cuda()
		    
		optimizer.zero_grad()
		outputs = net(imgs)
		loss    = CE_Loss(outputs, pngs, num_classes = NUM_CLASSES)
		loss.backward()
		optimizer.step()
		total_loss += loss.item()

保存模型:

#保存模型结构和参数
torch.save(net, 'net.pkl')  
# 只保存神经网络的模型参数
torch.save(net.state_dict(), 'net_params.pkl')  
#保存为ONNX
dummy_input = torch.randn(self.config.BATCH_SIZE, 1, 28, 28, device='cuda') #网络输入
input_names = ["inputs"]
output_names = ["outpus"]
torch_out = torch.onnx.export(net, dummy_input, "net.onnx", export_params=True, verbose=True,input_names=input_names, output_names=output_names)

tensorboard记录
例:

from torch.utils.tensorboard import SummaryWriter
import numpy as np

writer = SummaryWriter()

for n_iter in range(100):
    writer.add_scalar('Loss/train', np.random.random(), n_iter)
    writer.add_scalar('Loss/test', np.random.random(), n_iter)
    writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
    writer.add_scalar('Accuracy/test', np.random.random(), n_iter)

 

参考链接

https://pytorch.org/docs/stable/index.html

你可能感兴趣的:(pytorch,pytorch,网络,深度学习)