手写数字识别例题:

手写数字识别例题:
import torch
from matplotlib import pyplot as plt
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.optim as optim
import numpy as np
batch_size = 64
batch_size_test = 100
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
minist_tainloader = datasets.MNIST(root='./', train=True, download=True, transform=data_transform)
minist_testloader = datasets.MNIST(root='./', train=False, download=True, transform=data_transform)
trainloader = DataLoader(minist_tainloader, batch_size=batch_size, shuffle=True)
testloader = DataLoader(minist_testloader, batch_size=batch_size_test, shuffle=False)
class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(784, 512)
        self.linear2 = torch.nn.Linear(512, 256)
 self.linear3 = torch.nn.Linear(256, 128)
        self.linear4 = torch.nn.Linear(128, 64)
        self.linear5 = torch.nn.Linear(64, 10)
        self.relu = torch.nn.ReLU()
    def forward(self, x):
        x = x.view(-1, 784)
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.relu(self.linear3(x))
        x = self.relu(self.linear4(x))
        return self.linear5(x)
model = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)
loss_list = list()
def test_accuracy():
    correct = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            pred = model(images)
            total_num = 0
            correct = 0
            for i in range(batch_size_test):
                labels_np = labels.numpy().tolist()
                pred_np = pred.numpy().tolist()
                total_num += 1
                if labels_np[i] == pred_np[i].index(max(pred_np[i])):
                    correct += 1
            print(f'Accuracy = {correct/total_num}, i = {i}')
if __name__ == '__main__':
    for epoch in range(10):
        for i, data in enumerate(trainloader, 0):
            inputs, label = data
            outputs = model(inputs)
            optimizer.zero_grad()
            loss = criterion(outputs, label)
            loss_list.append(loss)
            loss.backward()
            optimizer.step()
        print(f'[{epoch}]: loss = {loss}')
   plt.plot(loss_list)
    plt.show()
    test_accuracy()
————————————————
版权声明:本文为CSDN博主「天一天666」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/qq_57943526/article/details/128227867

你可能感兴趣的:(python,深度学习,numpy)