import torch
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])
train_data = datasets.MNIST(root='.',train=True,download=True,transform=transform)
test_data = datasets.MNIST(root='.',train=False,download=True,transform=transform)
trainLoader = DataLoader(dataset=train_data,batch_size=64, shuffle=True, num_workers=2)
testLoader = DataLoader(dataset=test_data,batch_size=64, shuffle=False, num_workers=2)
class ConvModel(torch.nn.Module):
def __init__(self):
super(ConvModel, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
self.maxpooling = torch.nn.MaxPool2d(kernel_size=2)
self.linear = torch.nn.Linear(320, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
batch_size = x.size(0)
x = self.relu(self.maxpooling(self.conv1(x)))
x = self.relu(self.maxpooling(self.conv2(x)))
x = x.view(batch_size, -1)
return self.linear(x)
model = ConvModel()
criterion = torch.nn.CrossEntropyLoss()
optim = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)
if __name__ == '__main__':
for epoch in range(10):
sum = 0
for i, data in enumerate(trainLoader, 0):
inputs, labels = data
pred = model(inputs)
loss = criterion(pred, labels)
sum = sum + loss.item()
optim.zero_grad()
loss.backward()
optim.step()
print(f"epoch={epoch}, loss={sum/64}")