【Pytorch学习】 -- Customer Dataset & Data Augmentation

Customer Dataset

学习视频
数据集

构建

import os
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage import io 

class CatsAndDogsDataset(Dataset):
  def __init__(self, csv_file, root_dir, transform = None):
    self.annotations = pd.read_csv(csv_file)
    self.root_dir = root_dir
    self.transform = transform
  
  def __len__(self):
    return len(self.annotations)

  def __getitem__(self, index):
    img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])
    image = io.imread(img_path)
    y_label = torch.tensor(int(self.annotations.iloc[index, 1]))

    if self.transform:
      image = self.transform(image)

    return (image, y_label)

使用

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision
from torch.utils.data import DataLoader
from customDataset import CatsAndDogsDataset

# Set device
device = torch.decive('cuda' if torch.cuda.is_available() else 'cpu')

# Hyperparameters
in_channel = 3
num_classes = 10
learning_rate = 1e-3
batch_size = 32
num_epochs = 1

# Load Data
dataset = CatsAndDogsDataset(csv_file = 'cats_dogs.csv', root_dir = 'cats_dogs_resized',
                transforms = transforms.ToTensor())

train_set, test_set = torch.utils.data.random_split(dataset, [20000, 5000])
train_loader = DataLoader(dataset = train_set, batch_size=batch_size,  shuffle = True)     
test_loader = DataLoader(dataset = test_set, batch_size=batch_size,  shuffle = True)   

# Model
model = torchvision.models.googlenet(pretrained = Ture)
model.to(device)

# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = learning_rate)

# Train Network
for epoch in range(num_epochs):
  losses = []

  for batch_idx, (data, targets) in enumerate(train_loader):
    # Get data to cuda if possible
    data = data.to(decive = device)
    targets = targets.to(device = decive)

    # Forward
    scores = model(data)
    loss = criterion(scores, targets)

    losses.append(loss.item())

    # Backward
    optimizer.zero_grad()
    loss.backward()

    # adam step
    optimizer.step()

  print(f'Cost at epoch {epoch} is {sum(losses)/len(losses)}')

def check_accuracy(loader, model):
  num_correct = 0
  num_samples = 0
  model.eval()

  with torch.no_grad():
    for x, y in loader:
      x = x.to(device = device)
      y = y.to(decive = device)

      scores = model(x)
      _, predictions = scores.max(1)
      num_correct += (predictions == y).sum()
      num_samples += predictions.size(0)

    print(
      f"Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}"
    )
    
  model.train()

print("Checking accuracy on Training Set")
check_accuracy(train_loader, model)

print("Checking accuracy on Test Set")
check_accuracy(test_loader, model)

Data Augmentation

import torch
import torchvision.transforms as transforms
from torchvision.transforms.transforms import ColorJitter, RandomGrayscale, RandomHorizontalFlip, RandomVerticalFlip
from torchvision.utils import save_image
from customDataset import CatsAndDogsDataset

# Load Data
my_transforms = transforms.Compose([
  transforms.ToPILImage(),
  transforms.Resize((256, 256)),
  transforms.RandomCrop((224, 224)),
  transforms.ColorJitter(brightness = 0.5),
  transforms.RandomRotation(degrees = 45),
  transforms.RandomHorizontalFlip(p = 0.5),
  transforms.RandomVerticalFlip(p = 0.05),
  transforms.RandomGrayscale(p = 0.2),
  transforms.ToTensor(),
  transforms.Normalize(mean = [0.0, 0.0, 0.0], std = [1.0, 1.0, 1.0]) # (value - mean) / std
])

dataset = CatsAndDogsDataset(csv_file = 'cats_dogs.csv', root_dir = 'cats_dogs_resized',
                transforms = my_transforms)

img_num = 0
for _ in range(10):
  for img, label in dataset:
    save_image(img, 'img' + str(img_num) + '.png')
    img_num += 1

ps: transforms.Compose 中的操作按顺序进行操作

你可能感兴趣的:(pytorch,深度学习,学习)