课程中以1张图片为例,测试了预测效果,请从原始mnist数据集中,随机抽取出100张图片,测试下模型的分类准确率?
【作业内容】
✓代码跑通 请大家根据课上所学内容,补全代码,保证程序跑通。
【评分标准】
✓代码运行成功且有结果(打印100张图片的分类准确率),100分
如果notebook的代码自动提示能力可以和本地的pycharm自动提示能力差不多就好了
import os
import random
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
import numpy as np
from PIL import Image
import gzip
import json
# 首先获得训练数据和测试数据
train_data = paddle.dataset.mnist.train()
test_data = paddle.dataset.mnist.test()
# 接着对训练数据和测试数据进行乱序操作,缓冲区大小设置为100
train_data = fluid.io.shuffle(train_data,100)
test_data = fluid.io.shuffle(test_data,100)
# 最后对读取数据的reader进行装饰,得到批处理数据,batch_size = 100(方便测试时打印100张图片的分类准确度
train_data = fluid.io.batch(train_data,100)
test_data = fluid.io.batch(test_data,100)
# 定义模型结构
class MNIST(fluid.dygraph.Layer):
def __init__(self):
super(MNIST, self).__init__()
# 定义一个卷积层,使用relu激活函数
self.conv1 = Conv2D(num_channels=1, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
# 定义一个池化层,池化核为2,步长为2,使用最大池化方式
self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
# 定义一个卷积层,使用relu激活函数
self.conv2 = Conv2D(num_channels=20, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')
# 定义一个池化层,池化核为2,步长为2,使用最大池化方式
self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
# 定义一个全连接层,输出节点数为10
self.fc = Linear(input_dim=980, output_dim=10, act='softmax')
# 定义网络的前向计算过程
def forward(self, inputs, label):
x = self.conv1(inputs)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = fluid.layers.reshape(x, [x.shape[0], 980])
x = self.fc(x)
if label is not None:
acc = fluid.layers.accuracy(input=x, label=label)
return x, acc
else:
return x
#在使用GPU机器时,可以将use_gpu变量设置成True
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
with fluid.dygraph.guard(place):
model = MNIST()
model.train()
EPOCH_NUM = 5
BATCH_SIZE = 100
# 定义学习率,并加载优化器参数到模型中
total_steps = (int(60000//BATCH_SIZE) + 1) * EPOCH_NUM
lr = fluid.dygraph.PolynomialDecay(0.01, total_steps, 0.001)
# 使用Adam优化器
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=lr, parameter_list=model.parameters())
for epoch_id in range(EPOCH_NUM):
for batch_id, data in enumerate(train_data()):
# 准备数据,变得更加简洁
image_data = np.array([x[0] for x in data]).astype('float32').reshape(-1, 1, 28, 28)
# 获得图像标签数据,并转为float32类型的数组
label_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1)
image = fluid.dygraph.to_variable(image_data)
label = fluid.dygraph.to_variable(label_data)
#前向计算的过程,同时拿到模型输出值和分类准确率
predict, acc = model(image, label)
avg_acc = fluid.layers.mean(acc)
#计算损失,取一个批次样本损失的平均值
loss = fluid.layers.cross_entropy(predict, label)
avg_loss = fluid.layers.mean(loss)
#每训练了200批次的数据,打印下当前Loss的情况
if batch_id % 200 == 0:
print("epoch: {}, batch: {}, loss is: {}, acc is {}".format(epoch_id, batch_id, avg_loss.numpy(),avg_acc.numpy()))
#后向传播,更新参数的过程
avg_loss.backward()
optimizer.minimize(avg_loss)
model.clear_gradients()
# 保存模型参数和优化器的参数
fluid.save_dygraph(model.state_dict(), './checkpoint/mnist_epoch{}'.format(epoch_id))
fluid.save_dygraph(optimizer.state_dict(), './checkpoint/mnist_epoch{}'.format(epoch_id))
fluid.save_dygraph(model.state_dict(),'mnist')
epoch: 0, batch: 0, loss is: [2.9192822], acc is [0.18]
epoch: 0, batch: 200, loss is: [0.33062753], acc is [0.92]
epoch: 0, batch: 400, loss is: [0.09664985], acc is [0.95]
epoch: 1, batch: 0, loss is: [0.07815135], acc is [0.98]
epoch: 1, batch: 200, loss is: [0.07338614], acc is [0.98]
epoch: 1, batch: 400, loss is: [0.02472404], acc is [1.]
epoch: 2, batch: 0, loss is: [0.04026771], acc is [0.98]
epoch: 2, batch: 200, loss is: [0.07566174], acc is [0.98]
epoch: 2, batch: 400, loss is: [0.01209071], acc is [1.]
epoch: 3, batch: 0, loss is: [0.03427401], acc is [0.99]
epoch: 3, batch: 200, loss is: [0.03426887], acc is [0.98]
epoch: 3, batch: 400, loss is: [0.00716644], acc is [1.]
epoch: 4, batch: 0, loss is: [0.0203767], acc is [0.99]
epoch: 4, batch: 200, loss is: [0.00760919], acc is [1.]
epoch: 4, batch: 400, loss is: [0.00716131], acc is [1.]
with fluid.dygraph.guard():
print('start testing ......')
# 加载模型参数
model = MNIST()
model_state_dict, _ = fluid.load_dygraph('mnist')
# 这里选择使用set_dict,因为load_dict即将过时
model.set_dict(model_state_dict)
model.eval()
for batch_id, data in enumerate(test_data()):
image_data = np.array([x[0] for x in data]).astype('float32').reshape(-1, 1, 28, 28)
label_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1)
image = fluid.dygraph.to_variable(image_data)
label = fluid.dygraph.to_variable(label_data)
prediction, acc = model(image, label)
avg_acc = fluid.layers.mean(acc)
print('随机抽取100张图片,测试平均准确率为{}'.format(avg_acc.numpy()))
break
start testing ......
随机抽取100张图片,测试平均准确率为[0.98]
经过多次的测试发现,由于测试数据量小,并且样本随机,因此测试的平均正确率会出现较大误差
为了增加测试的可信性
这里测试100批的数据,batch_size = 100, 统计不同批次的平均正确率,并计算总的平均正确率
import matplotlib.pyplot as plt
with fluid.dygraph.guard():
print('start testing ......')
# 加载模型参数
model = MNIST()
model_state_dict, _ = fluid.load_dygraph('mnist')
# 这里选择使用set_dict,因为load_dict即将过时
model.set_dict(model_state_dict)
model.eval()
avgs = []
id = []
for batch_id, data in enumerate(test_data()):
image_data = np.array([x[0] for x in data]).astype('float32').reshape(-1, 1, 28, 28)
label_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1)
image = fluid.dygraph.to_variable(image_data)
label = fluid.dygraph.to_variable(label_data)
prediction, acc = model(image, label)
avg_acc = fluid.layers.mean(acc)
id.append(batch_id)
avgs.append(avg_acc.numpy())
plt.title("Average accuracy of multiple batches of sample testing")
plt.plot(id,avgs)
plt.xlabel("batch_id")
plt.ylabel("average accuracy")
plt.show()
# 10000个测试样本的平均正确率
avg_all_acc = np.array(avgs).mean()
label("batch_id")
plt.ylabel("average accuracy")
plt.show()
# 10000个测试样本的平均正确率
avg_all_acc = np.array(avgs).mean()
print("the average accuracy of the 10000 testing samples is {}".format(avg_all_acc))
start testing ......
the average accuracy of the 10000 testing samples is 0.9869000315666199