Datawhale组队学习_Pytorch基础实战FashionMNIST时装分类学习笔记

本文为学习Datawhale 2021.10组队学习深入浅出Pytorch笔记
原学习文档地址:https://github.com/datawhalechina/thorough-pytorch

import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

batch_size = 8
num_workers = 0  # win系统限制 
lr = 1e-4
epochs = 2

# load data
image_size = 28  # 模型可能对输入的图像大小有要求

# 使用transforms包对图像做变换
data_transform = transforms.Compose([
    # transforms.ToPILImage(),  # torchvision下载的数据不需要进行这一步
    transforms.Resize(image_size),
    transforms.ToTensor()
])

# 方式一,下载
train_data = datasets.FashionMNIST(root='./', train=True, download=True, transform=data_transform)
test_data = datasets.FashionMNIST(root='./', train=False, download=True, transform=data_transform)

## 读取方式二:读入csv格式的数据,自行构建Dataset类
# class FMDataset(Dataset):
#     def __init__(self, df, transform=None):
#         self.df = df
#         self.transform = transform
#         self.images = df.iloc[:, 1:].values.astype(np.uint8)
#         self.labels = df.iloc[:, 0].values
#
#     def __len__(self):
#         return len(self.images)
#
#     def __getitem__(self, idx):
#         image = self.images[idx].reshape(28, 28, 1)
#         label = int(self.labels[idx])
#         if self.transform is not None:
#             image = self.transform(image)
#         else:
#             image = torch.tensor(image / 255., dtype=torch.float)
#         label = torch.tensor(label, dtype=torch.long)
#         return image, label
#
#
# train_df = pd.read_csv("./FashionMNIST/fashion-mnist_train.csv")
# test_df = pd.read_csv("./FashionMNIST/fashion-mnist_test.csv")
# train_data = FMDataset(train_df, data_transform)
# test_data = FMDataset(test_df, data_transform)

# Dataloader
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers,drop_last=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False,num_workers=num_workers)

# Visualization
image, label = next(iter(train_loader))
print(image.shape, label.shape)
plt.imshow(image[0][0], cmap="gray")

# modal
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 32, 5),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),
            nn.Dropout(0.3),
            nn.Conv2d(32, 64, 5),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),
            nn.Dropout(0.3)
        )
        self.fc = nn.Sequential(
            nn.Linear(64*4*4, 512),
            nn.ReLU(),
            nn.Linear(512, 10)
        )

    def forward(self, x):
        x = self.conv(x)
        x = x.view(-1, 64*4*4)
        x = self.fc(x)
        return x

model = Net()
model = model.cuda()

# pytorch会自动把整数型的label转为one-hot型,用于计算CE loss
# 这里需要确保label是从0开始的,同时模型不加softmax层(使用logits计算),这也说明了PyTorch训练中各个部分不是独立的,需要通盘考虑
criterion = nn.CrossEntropyLoss()  # 因为CrossEntropyLoss()已经集承了logsoftmax函数
optimizer = optim.Adam(model.parameters(), lr=0.001)

#  train and val 
def train(epoch):
    model.train()
    train_loss = 0
    for data, label in train_loader:
        data, label = data.cuda(), label.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, label)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()*data.size(0)
    train_loss = train_loss/len(train_loader.dataset)
    print('Epoch: {} \tTraining Loss: {:.6f}'.format(epoch, train_loss))

def val(epoch):
    model.eval()
    val_loss = 0
    gt_labels = []
    pred_labels = []
    with torch.no_grad():
        for data, label in test_loader:
            data, label = data.cuda(), label.cuda()
            output = model(data)
            preds = torch.argmax(output, 1)
            gt_labels.append(label.cpu().data.numpy())
            pred_labels.append(preds.cpu().data.numpy())
            loss = criterion(output, label)
            val_loss += loss.item()*data.size(0)
    val_loss = val_loss/len(test_loader.dataset)
    gt_labels, pred_labels = np.concatenate(gt_labels), np.concatenate(pred_labels)
    acc = np.sum(gt_labels==pred_labels)/len(pred_labels)
    print('Epoch: {} \tValidation Loss: {:.6f}, Accuracy: {:6f}'.format(epoch, val_loss, acc))

for epoch in range(1, epochs+1):
    train(epoch)
    val(epoch)

# save modal
save_path = "./FashionModel.pkl"
torch.save(model, save_path)

遇到了一个环境问题

OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://www.intel.com/software/products/support/.
Process finished with exit code 3

按照这个链接解决的

https://zhuanlan.zhihu.com/p/371649016

你可能感兴趣的:(Datawhale组队学习,pytorch,分类,深度学习)