from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
cudnn.benchmark = True
plt.ion() # interactive mode
print(torch.__version__)
1.11.0
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()
]),
}
data_dir = '/kaggle/input/data-cat'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
编写训练循环
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print(f'Epoch {epoch}/{num_epochs - 1}')
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
print(f'Best val Acc: {best_acc:4f}')
# load best model weights
model.load_state_dict(best_model_wts)
return model
构建模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(256, 512, 3, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
)
self.fc = nn.Sequential(
nn.AvgPool2d(7,7),
nn.Flatten(),
nn.Dropout(0.5),
nn.Linear(512, 2),
)
def forward(self, img):
x = self.conv(img)
x = self.fc(x)
return x
model = Net().to(device)
print(model)
Net( (conv): Sequential( (0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (1): ReLU() (2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (4): ReLU() (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (6): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (7): ReLU() (8): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (9): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (10): ReLU() (11): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (12): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (13): ReLU() (14): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) ) (fc): Sequential( (0): AvgPool2d(kernel_size=7, stride=7, padding=0) (1): Flatten(start_dim=1, end_dim=-1) (2): Dropout(p=0.5, inplace=False) (3): Linear(in_features=512, out_features=2, bias=True) ) )
!pip install torchsummary
from torchsummary import summary
Requirement already satisfied: torchsummary in /opt/conda/lib/python3.7/site-packages (1.5.1) WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv class="ansi-yellow-fg">
summary(model,(3,224,224))
---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 32, 224, 224] 896 ReLU-2 [-1, 32, 224, 224] 0 MaxPool2d-3 [-1, 32, 112, 112] 0 Conv2d-4 [-1, 64, 112, 112] 18,496 ReLU-5 [-1, 64, 112, 112] 0 MaxPool2d-6 [-1, 64, 56, 56] 0 Conv2d-7 [-1, 128, 56, 56] 73,856 ReLU-8 [-1, 128, 56, 56] 0 MaxPool2d-9 [-1, 128, 28, 28] 0 Conv2d-10 [-1, 256, 28, 28] 295,168 ReLU-11 [-1, 256, 28, 28] 0 MaxPool2d-12 [-1, 256, 14, 14] 0 Conv2d-13 [-1, 512, 14, 14] 1,180,160 ReLU-14 [-1, 512, 14, 14] 0 MaxPool2d-15 [-1, 512, 7, 7] 0 AvgPool2d-16 [-1, 512, 1, 1] 0 Flatten-17 [-1, 512] 0 Dropout-18 [-1, 512] 0 Linear-19 [-1, 2] 1,026 ================================================================ Total params: 1,569,602 Trainable params: 1,569,602 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.57 Forward/backward pass size (MB): 53.41 Params size (MB): 5.99 Estimated Total Size (MB): 59.98 ----------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)
model = train_model(model, criterion, optimizer, exp_lr_scheduler,
num_epochs=35)
Epoch 0/34 ---------- train Loss: 0.6885 Acc: 0.5350 val Loss: 0.6815 Acc: 0.5524 Epoch 1/34 ---------- train Loss: 0.6874 Acc: 0.5482 val Loss: 0.6795 Acc: 0.5542 Epoch 2/34 ---------- train Loss: 0.6757 Acc: 0.5711 val Loss: 0.6735 Acc: 0.5624 Epoch 3/34 ---------- train Loss: 0.6432 Acc: 0.6374 val Loss: 0.5917 Acc: 0.6886 Epoch 4/34 ---------- train Loss: 0.6248 Acc: 0.6576 val Loss: 0.5702 Acc: 0.7214
···
Epoch 30/34 ---------- train Loss: 0.3251 Acc: 0.8559 val Loss: 0.2581 Acc: 0.8928 Epoch 31/34 ---------- train Loss: 0.3234 Acc: 0.8535 val Loss: 0.2584 Acc: 0.8932 Epoch 32/34 ---------- train Loss: 0.3234 Acc: 0.8542 val Loss: 0.2571 Acc: 0.8946 Epoch 33/34 ---------- train Loss: 0.3202 Acc: 0.8538 val Loss: 0.2589 Acc: 0.8928 Epoch 34/34 ---------- train Loss: 0.3284 Acc: 0.8526 val Loss: 0.2574 Acc: 0.8946 Training complete in 63m 43s Best val Acc: 0.894600