基于卷积神经网络的手写数字集识别源码(MINIST)Pytorch框架

import numpy as np
import csv
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision.datasets as normal_datasets
import torchvision.transforms as transforms
from torch.autograd import Variable

#处理并加载验证数据
test_dataset = normal_datasets.MNIST(root=’./mnist/’,
train=False,
transform=transforms.ToTensor())
#print(len(test_dataset)) #10000个验证集
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=64,
shuffle=False)

#读取训练数据
def loadTrainData():
l=[]
with open(‘data/train.csv’) as file:
lines=csv.reader(file)
for line in lines:
l.append(line) #42001*785
l.remove(l[0])
l=np.array(l)
label=l[:,0]
data=l[:,1:]
return data,label

#读取测试数据

l=[]
with open(‘data/test.csv’) as file:
lines=csv.reader(file)
for line in lines:
l.append(line) #42001*785
l.remove(l[0])
l=np.array(l)
test_data=l
#print(test_data.shape)
#读取训练数据和标签
train_data,train_label=loadTrainData()
train_data=train_data.astype(dtype=np.float64) #将数据转换成浮点类型的
train_label=train_label.astype(dtype=np.int)
#print(type(train_label)) #数组类型的
#print(train_data.shape) #42000,784
train_data=train_data.reshape((42000,28,28))
#print(train_data.shape) #42000,28,28
train_data = np.multiply(train_data, 1.0 / 255.0) #数据标准化
train_data = torch.Tensor(train_data) #转化成张量形式
#print(type(train_data))
train_data = torch.unsqueeze(train_data, dim=1).type(torch.FloatTensor)
#print(train_data.shape) #42000,1,28,28

#train_label=train_label.astype(dtype=np.int)
#print(type(train_label)) #数组类型
train_label = torch.from_numpy(train_label).long()
#print(type(train_label))
#print(train_label)
train_dataset=Data.TensorDataset(train_data,train_label)
train_loder=Data.DataLoader(dataset=train_dataset, batch_size=20, shuffle=True)#分批并打乱顺序
#print(train_label)

#处理测试数据
test_data=test_data.astype(dtype=np.float64)
test_data=test_data.reshape((28000,28,28))
test_data=np.multiply(test_data, 1.0 / 255.0)
test_data=torch.Tensor(test_data)
test_data = torch.unsqueeze(test_data, dim=1).type(torch.FloatTensor)
print(test_data.shape) #28000,1,28,28
#test_data=test_data[:10]
#print(type(test_data))
test_label=np.random.randint(0,2,28000)
test_label=torch.from_numpy(test_label).long()
test_dataset_1=Data.TensorDataset(test_data,test_label)
print(test_dataset_1)
test_loader_1=Data.DataLoader(dataset=test_dataset_1,shuffle=False)

#定义类
class CNN(nn.Module):
def init(self):
super(CNN, self).init()
# 定义卷基层
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3), #b,16,26,26
nn.BatchNorm2d(16),
nn.ReLU())

    self.layer2= nn.Sequential(
        nn.Conv2d(16, 32, kernel_size=3),    #32,24,24
        nn.BatchNorm2d(32),
        nn.ReLU(),
        nn.MaxPool2d(2,stride=2))            #32,12,12

    self.layer3=nn.Sequential(
        nn.Conv2d(32,64,kernel_size=3),      #64,10,10
        nn.BatchNorm2d(64),
        nn.ReLU())

    self.layer4=nn.Sequential(
        nn.Conv2d(64,128,kernel_size=3),     #128,8,8
        nn.BatchNorm2d(128),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=2,stride=2))#128,4,4

    self.fc=nn.Sequential(
        nn.Linear(128*4*4,1024),
        nn.ReLU(),
        nn.Linear(1024,128),
        nn.ReLU(),
        nn.Linear(128,10))


def forward(self, x):
    x=self.layer1(x)
    x=self.layer2(x)
    x=self.layer3(x)
    x=self.layer4(x)
    x = x.view(x.size(0), -1)  # reshape
    x=self.fc(x)
    return x

cnn=CNN()
if torch.cuda.is_available():
cnn.cuda()
#优化
optimizer=torch.optim.Adam(cnn.parameters(),lr=0.001)
loss_func=nn.CrossEntropyLoss()

lossel=[]
access=[]
for epoch in range(100):
train_loss=0
train_acc=0
for step, (x, label) in enumerate(train_loder):
#print(x.shape) #64,1,28,28
if torch.cuda.is_available():
b_x = Variable(x.cuda())
label = Variable(label.cuda())
print(label.shape) # 20
# b_x = x
out=cnn(b_x)
print(out.shape) #20,10
loss = loss_func(out,label) # 计算损失函数
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播
optimizer.step() # 梯度优化

#测试验证集
test_acc=0
access=[]
num_cor=0
for test_x, label in test_loader:
test_x=torch.Tensor(test_x)
test_x=test_x.reshape((-1,1,28,28))
#print(test_x.shape)
if torch.cuda.is_available():
test_x = Variable(test_x.cuda())
label = Variable(label.cuda())
# b_x = x

    out=cnn(test_x)
# 计算准确率
_, pred = out.max(1)
num_correct = (pred == label).sum().data[0]
num_correct = num_correct.cpu().numpy()
acc = num_correct / test_x.shape[0]
test_acc += acc
num_cor += num_correct

print(num_correct)

access.append(test_acc/ len(test_loader))
print(‘cor_num:{}, Test Acc: {:.6f}’
.format(num_cor, test_acc / len(test_loader)))

#测试数据
output=[]
for x ,label in test_loader_1:
x=torch.Tensor(x)
x=Variable(x).cuda()
out=cnn(x)
test_output=torch.max(out,1)[1].data.cpu().numpy().squeeze()

print(len(test_output))

#print(test_output)
output.append(test_output)

#print(output)
print(len(output))
np.savetxt(“data/kaggle_out.txt”,output)
label=np.loadtxt(“data/kaggle_out.txt”)
test_label=[]
for i in label:
test_label.append(i)

with open(‘data/kaggle_out.csv’, ‘w’,newline=’’) as myFile:
myWriter = csv.writer(myFile)
for i in test_label:
tmp = []
tmp.append(i)
myWriter.writerow(tmp)

你可能感兴趣的:(基于卷积神经网络的手写数字集识别源码(MINIST)Pytorch框架)