model.py
import torch.nn as nn
import torch
from torch.utils.tensorboard import SummaryWriter
num_classes = 10
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[48, 55, 55]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[48, 27, 27]
nn.Conv2d(48, 128, kernel_size=5, padding=2), # output[128, 27, 27]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 13, 13]
nn.Conv2d(128, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, padding=1), # output[192, 13, 13]
nn.ReLU(inplace=True),
nn.Conv2d(192, 128, kernel_size=3, padding=1), # output[128, 13, 13]
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 6, 6]
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5), # Dropout 随机失活神经元,默认比例为0.5 (AlexNet防止过拟合的手段)
nn.Linear(128 * 6 * 6, 2048),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),
)
# 前向传播
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, start_dim=1) # 展平后再传入全连接层
x = self.classifier(x)
return x
# writer = SummaryWriter("model_logs")
if __name__ == '__main__':
model = AlexNet()
print(model)
# writer.add_graph(model, torch.rand(1, 3, 224, 224))
# writer.close()
train.py
import os
import sys
import torch
import torchvision
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model import AlexNet
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("using {} device.".format(device))
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),
"val": transforms.Compose([transforms.Resize((224, 224)), # cannot 224, must (224, 224)
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}
train_dataset = torchvision.datasets.CIFAR10(root='data', train=True, download=True, transform=data_transform["train"])
val_dataset = torchvision.datasets.CIFAR10(root='data', train=False, transform=data_transform["val"])
# {'airplane': 0, 'automobile': 1, 'bird': 2, 'cat': 3, 'deer': 4, 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8,
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False)
train_num = len(train_dataset)
val_num = len(val_dataset)
model = AlexNet()
model.to(device)
loss_function = nn.CrossEntropyLoss()
loss_function = loss_function.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
best_acc = 0.0
epochs = 200
save_path = 'AlexNet.pth'
train_steps = len(train_loader)
val_steps = len(train_loader)
writer = SummaryWriter("logs")
resume = True # 设置是否需要从上次的状态继续训练
if resume:
if os.path.isfile("AlexNet.pth"):
print("Resume from checkpoint...")
checkpoint = torch.load("AlexNet.pth")
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
initepoch = checkpoint['epoch'] + 2
print("====>loaded checkpoint (epoch{})".format(checkpoint['epoch']+1))
else:
print("====>no checkpoint found.")
initepoch = 1 # 如果没进行训练过,初始训练epoch值为1
for epoch in range(initepoch - 1, epochs):
# train
print("-------第 {} 轮训练开始-------".format(epoch + 1))
model.train()
train_acc = 0.0
running_loss = 0.0
train_bar = tqdm(train_loader, file=sys.stdout)
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()
outputs = model(images.to(device))
loss = loss_function(outputs, labels.to(device))
loss.backward()
optimizer.step()
running_loss += loss.item()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1, epochs, loss)
_, predict = torch.max(outputs, dim=1)
train_acc += torch.eq(predict, labels.to(device)).sum().item()
train_loss = running_loss / train_steps
train_accurate = train_acc / train_num
# val
model.eval()
val_acc = 0.0
running_loss = 0.0
with torch.no_grad():
val_bar = tqdm(val_loader, file=sys.stdout)
for step, val_data in enumerate(val_bar):
val_images, val_labels = val_data
outputs = model(val_images.to(device))
loss = loss_function(outputs, val_labels.to(device))
running_loss += loss.item()
_, predict = torch.max(outputs, dim=1)
val_acc += torch.eq(predict, val_labels.to(device)).sum().item()
val_loss = running_loss / val_steps
val_accurate = val_acc / val_num
print('[epoch %d] train_loss: %.3f val_loss:%.3f train_accuracy:%.3f val_accuracy: %.3f' %
(epoch + 1, train_loss,val_loss,train_accurate, val_accurate))
writer.add_scalars('loss',
{'train': train_loss, 'val': val_loss}, global_step=epoch)
writer.add_scalars('acc',
{'train': train_accurate, 'val': val_accurate}, global_step=epoch)
# 保存断点
checkpoint = {"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch}
path_checkpoint = "AlexNet.pth"
torch.save(checkpoint, path_checkpoint)
print("保存模型成功")
print('Finished Training')
writer.close()
程序设置了断点续训,停止后可继续训练,日志文件可以用tensorboard查看。