线性卷积和有关手写识别例题的提升

手写数字识别例题:

import torch
from matplotlib import pyplot as plt
from torchvision import datasets
from torch.utils.data import DataLoader#需要下载的包
from torchvision import transforms
import torch.optim as optim
import numpy as np

batch_size = 64
batch_size_test = 100
data_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])

minist_tainloader = datasets.MNIST(root='./', train=True, download=True, transform=data_transform)
minist_testloader = datasets.MNIST(root='./', train=False, download=True, transform=data_transform)

trainloader = DataLoader(minist_tainloader, batch_size=batch_size, shuffle=True)
testloader = DataLoader(minist_testloader, batch_size=batch_size_test, shuffle=False)


class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(784, 512)
        self.linear2 = torch.nn.Linear(512, 256)
        self.linear3 = torch.nn.Linear(256, 128)
        self.linear4 = torch.nn.Linear(128, 64)
        self.linear5 = torch.nn.Linear(64, 10)
        self.relu = torch.nn.ReLU()

    def forward(self, x):
        x = x.view(-1, 784)
        x = self.relu(self.linear1(x))
        x = self.relu(self.linear2(x))
        x = self.relu(self.linear3(x))
        x = self.relu(self.linear4(x))
        return self.linear5(x)

model = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)
loss_list = list()

def test_accuracy():
    correct = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            pred = model(images)
            total_num = 0
            correct = 0
            for i in range(batch_size_test):
                labels_np = labels.numpy().tolist()
                pred_np = pred.numpy().tolist()
                total_num += 1
                if labels_np[i] == pred_np[i].index(max(pred_np[i])):
                    correct += 1
            print(f'Accuracy = {correct/total_num}, i = {i}')


if __name__ == '__main__':
    for epoch in range(10):
        for i, data in enumerate(trainloader, 0):
            inputs, label = data
            outputs = model(inputs)

            optimizer.zero_grad()
            loss = criterion(outputs, label)
            loss_list.append(loss)
            loss.backward()

            optimizer.step()
        print(f'[{epoch}]: loss = {loss}')

    plt.plot(loss_list)
    plt.show()

    test_accuracy()
通过PIL识别图像

import numpy as np
from PIL import Image

a = Image.open('test.jpg')
c = a.convert('L')
c.show()
# print(c)
im = np.array(a)
im_gray = np.array(c)
print(im_gray.shape)
print(im_gray)
print(im.shape)
# print(im)
b = np.array([[[1,2,3],[2,3,3],[3,4,5]],[[2,1,2],[3,4,5],[4,5,6]]])
# print(b.shape)
# a.show()
# print(a)

卷积函数:

import torch
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
transform=transforms.Compose([transforms.ToTensor(),
            transforms.Normalize((0.1307,),(0.3081,))])

train_data=datasets.MNIST(root='.',download=True,transform=transform)
test_data=datasets.MNIST(root='.',download=False,transform=transform)

trainLoader=DataLoader(dataset=train_data,batch_size=64,shuffle=True,num_workers=2)
testLoader=DataLoader(dataset=test_data,batch_size=64,shuffle=False,num_workers=2)

class ConvModel(torch.nn.Module):
    def __init__(self):
        super(ConvModel, self).__init__()
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
        self.maxpooling=torch.nn.MaxPool2d(kernel_size=2)
        self.linear=torch.nn.Linear(320,10)
        self.relu=torch.nn.ReLU()
    def forward(self,x):
        batch_size=x.size(0)
        x=self.relu(self.maxpooling(self.conv1(x)))
        x = self.relu(self.maxpooling(self.conv2(x)))
        x=x.view(batch_size,-1)
        return self.linear(x)  #模型的构建
model=ConvModel()              #模型的初始化

criterion=torch.nn.CrossEntropyLoss()    #选择函数和优化器
optim=torch.optim.SGD(model.parameters(),lr=1e-2,momentum=0.5)

if __name__ == '__main__':
    for epoch in range(10):
        sum=0
        for i,data in enumerate(trainLoader,0):
            inputs,labels=data
            pred=model(inputs)
            loss=criterion(pred,labels)  #优化器的清零
            sum=sum+loss.item()
            optim.zero_grad()
            loss.backward()
            optim.step()
            print(f"epoch={epoch},loss={sum/64}")


你可能感兴趣的:(python,深度学习,numpy)