LeNet是最早的卷积神经网络之一, 1998年,Yann LeCun第一次将LeNet卷积神经网络应用到图像分类上,在手写数字识别任务中取得了巨大成功。LeNet通过连续使用卷积和池化层的组合提取图像特征,其架构图如下(这里展示的是用于MNIST手写体数字识别任务中的LeNet-5模型):
import paddle
import numpy as np
from paddle.nn import Conv2D, MaxPool2D, Linear
import paddle.nn.functional as f
# 定义LeNet网络结构
class LeNet(paddle.nn.Layer):
def __init__(self, num): # num:分类标签的分类数
super(LeNet, self).__init__()
# 创建第一个卷积和池化层
self.conv1 = Conv2D(in_channels=1, out_channels=6, kernel_size=5)
self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
# 创建第二个卷积和池化层
self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
# 创建第三个卷积层
self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
# 创建全连接层,第一个的输出神经元个数为64, 第二个的输出神经元个数为分类标签的类别数
self.fc1 = Linear(in_features=120, out_features=64)
self.fc2 = Linear(in_features=64, out_features=num)
# 卷积神经网络的前向计算过程
def forward(self, x):
x = self.conv1(x)
# 每个卷积层使用relu函数,而后是一个2*2的池化
x = f.relu(x)
x = self.max_pool1(x)
x = f.relu(x)
x = self.conv2(x)
x = self.max_pool2(x)
x = self.conv3(x)
# 尺寸逻辑:输入层将数据拉平[B,C,H,W] ->[B,C*H*W]
x = paddle.reshape(x, [x.shape[0], -1])
x = self.fc1(x)
x = f.relu(x)
x = self.fc2(x)
return x
from LeNet import LeNet
import os
import random
import paddle
import numpy as np
import paddle.nn.functional as f
from paddle.vision.transforms import ToTensor
from paddle.vision.datasets import MNIST
def train(model, opt, train_loader, valid_loader, epochs):
# 开启CPU训练
paddle.device.set_device('cpu')
print('开始训练...')
model.train()
for epoch in range(epochs):
for batch_id, data in enumerate(train_loader()):
img = data[0]
label = data[1] # 真实值
logits = model(img) # 预测值
# 计算损失函数
loss_func = paddle.nn.CrossEntropyLoss(reduction='none')
loss = loss_func(logits, label)
aver_loss = paddle.mean(loss)
if batch_id % 2000 == 0:
print(f"epoch: {epoch}, batch_id: {batch_id}, loss is: {float(aver_loss.numpy())}")
aver_loss.backward() # 反向传播计算
opt.step() # 更新x
opt.clear_grad() # 清除梯度
model.eval()
accuracies = []
losses = []
for batch_id, data in enumerate(valid_loader):
img, label = data[0], data[1] # label:真实值
logits = model(img) # 传入神经网络的预测值
pred = f.relu(logits) # 直接传入relu函数得到的预测值
loss_func = paddle.nn.CrossEntropyLoss(reduction='none')
loss = loss_func(logits, label)
acc = paddle.metric.accuracy(pred, label)
accuracies.append(acc.numpy())
losses.append(loss.numpy())
print(f"[validation] accuracy/loss: {np.mean(accuracies)}/{np.mean(losses)}") # 准确率与平均损失的比值
model.train()
# 保存模型参数
paddle.save(model.state_dict(), 'mnist.pdparams')
# 创建数据
model = LeNet(num=10)
# 设置迭代次数
epochs = 5
# 设置优化器Momentum, 学习率为0.001
opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameters=model.parameters())
# 定义数据读取器
train_loader = paddle.io.DataLoader(MNIST(mode='train', transform=ToTensor()), batch_size=10, shuffle=True)
valid_loader = paddle.io.DataLoader(MNIST(mode='test', transform=ToTensor()), batch_size=10)
# 开始训练
train(model, opt, train_loader, valid_loader, epochs)
分析
每一次迭代,loss的变化是不确定的,有的次数是一直减小,有的次数是先减小后增大,有的次数是先增大后减小。但看每一次迭代的最后一行,也就是有[validation]的这一行,测试数据得到的准确率一直在增大,平均损失一直在减小,并且准确率在97%以上。相比于paddle计算机视觉(下)---- 图像分类中给出的92%还要高出许多,这个链接里面的代码使用的是sigmoid函数,我的代码使用的是relu函数。我又用sigmoid函数运行了一下,得到的结果如下:
对比两次结果的准确率我们就不难看出,从第一次迭代到最后一次迭代,使用Relu函数得到的准确率都要比使用sigmoid函数得到的准确率高,所以使用LeNet网络训练数据和测试数据,应优先考虑ReLu函数。
总结
总的来说,无论是使用Relu函数,还是使用sigmoid函数,LeNet网络在手写数字识别上的应用都是非常成功的。接下来,我们来看一下LeNet在眼疾识别数据集iChallenge-PM上的应用的效果如何。
import paddle
import numpy as np
from paddle.nn import Conv2D, MaxPool2D, Linear, Dropout
import paddle.nn.functional as f
# 定义LeNet网络结构
class LeNet(paddle.nn.Layer):
def __init__(self, num):
super(LeNet, self).__init__()
self.num = num
# 创建卷积和池化层,每个卷积层使用relu函数,后面跟着一个2*2的池化层
self.conv1 = Conv2D(in_channels=3, out_channels=6, kernel_size=5)
self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
self.conv2 = Conv2D(in_channels=6, out_channels=16, kernel_size=5)
self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
self.conv3 = Conv2D(in_channels=16, out_channels=120, kernel_size=4)
self.fc1 = Linear(in_features=300000, out_features=64)
self.fc2 = Linear(in_features=64, out_features=num)
self.dropout = Dropout(p=0.5)
# 网络的前向计算
def forward(self, x, label=None):
x = self.conv1(x)
x = f.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = f.relu(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = f.relu(x)
x = paddle.reshape(x, [x.shape[0], -1])
x = self.fc1(x)
x = self.dropout(x)
x = f.relu(x)
x = self.fc2(x)
if label is not None:
if self.num == 1:
pred = f.relu(x)
pred = paddle.concat([1.0, -pred, pred], axis=1)
acc = paddle.metric.accuracy(pred, paddle.cast(label, dtype='int64'))
else:
acc = paddle.metric.accuracy(x, paddle.cast(label, dtype='int64'))
return x, acc
else:
return x
import cv2
import random
import numpy as np
import os
# 对读入的图像数据进行预处理
def transform_img(img):
# 将图片尺寸缩放至224*224
img = cv2.resize(img, (224, 224))
# 读入的图像数据格式是[H, W, C]
# 使用转置操作将其变成[C, H, W]
img = np.transpose(img, (2, 0, 1)).astype('float32')
# 将数据范围调整到[-1.0, 1.0]之间
img = (img / 255) * 2.0 - 1.0
return img
# 定义训练集数据读取器
def data_loader(data_dir, batch_size=10, mode='train'):
# 将data_dir目录下的文件列出来,每条文件都要读入
filenames = os.listdir(data_dir)
def reader():
if mode == 'train':
# 训练时随时打乱数据顺序
random.shuffle(filenames)
batch_imgs, batch_labels = [], []
for name in filenames:
filepath = os.path.join(data_dir, name)
img = cv2.imread(filenames)
img = transform_img(img)
if name[0] == 'H' or name[0] == 'N':
# H开头的文件名表示高度近似,N开头的文件名表示正常视力
# 高度近视和正常视力的样本,都不是病理性的,属于负样本,标签为0
label = 0
elif name[0] == 'P':
# P开头的是病理性近视,属于正样本,标签为1
label = 1
else:
raise 'Not excepted file name'
# 每读取一个样本的数据,就将其放入数据列表中
batch_imgs.append(img)
batch_labels.append(label)
if len(batch_imgs) == batch_size:
# 当数据列表的长度等于batch_size的时候,把这些数据当作一个mini-batch,并作为数据生成器的一个输出
imgs_array = np.array(batch_imgs).astype('float32')
labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
yield imgs_array, labels_array
batch_imgs, batch_labels = [], []
if len(batch_imgs) > 0:
# 剩余样本数目不足一个batch_size的数据,一起打包成一个mini-batch
imgs_array = np.array(batch_imgs).astype('float32')
labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
yield imgs_array, labels_array
return reader
def valid_data_loader(data_dir, csvfile, batch_size=10, mode='valid'):
# 训练集读取时通过文件名来确定样本标签,验证集则通过csvfile来读取每个图片对应的标签
# 请查看解压后的验证集标签数据,观察csvfile文件里面所包含的内容
# csvfile文件所包含的内容格式如下,每一行代表一个样本,
# 其中第一列是图片id,第二列是文件名,第三列是图片标签,
# 第四列和第五列是Fovea的坐标,与分类任务无关
# ID,imgName,Label,Fovea_X,Fovea_Y
# 1,V0001.jpg,0,1157.74,1019.87
# 2,V0002.jpg,1,1285.82,1080.47
# 打开包含验证集标签的csvfile,并读入其中的内容
filelists = open(csvfile, 'r', encoding='utf-8').readlines()
def reader():
batch_imgs, batch_labels = [], []
for line in filelists[1: ]:
line, name, label = line.strip().split(','), line[1], int(line[2])
# 根据图片文件名加载图片,并对图像数据作预处理
filepath = os.path.join(data_dir, name)
img = cv2.imread(filepath)
img = transform_img(img)
# 每读取一个样本的数据,就将其放入数据列表中
batch_imgs.append(img)
batch_labels.append(label)
if len(batch_imgs) == batch_size:
# 当数据列表的长度等于batch_size的时候,
# 把这些数据当作一个mini-batch,并作为数据生成器的一个输出
imgs_array = np.array(batch_imgs).astype('float32')
labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
yield imgs_array, labels_array
batch_imgs, batch_labels = [], []
if len(batch_imgs):
# 剩余样本数目不足一个batch_size的数据,一起打包成一个mini-batch
imgs_array = np.array(batch_imgs).astype('float32')
labels_array = np.array(batch_labels).astype('float32').reshape(-1, 1)
yield imgs_array, labels_array
return reader
导入数据
Data_dir1 = 'D:/杂文/桌面/人工智能/dataset/training/PALM-Training400/PALM-Training400/PALM-Training400'
Data_dir2 = 'D:/杂文/桌面/人工智能/dataset/validation/PALM-Validation400'
Csvfile = 'D:/杂文/桌面/人工智能/dataset/valid_gt/PALM-Validation-GT/PM_Label_and_Fovea_Location.csv'
# 设置迭代次数
epochs = 5
训练函数
def train_pm(model, optimizer):
paddle.device.set_device('cpu')
print('训练开始...')
model.train()
# 定义数据读取器,训练数据读取器和验证数据读取器
train_loader = data_loader(Data_dir1, batch_size=10, mode='train')
valid_loader = valid_data_loader(Data_dir2, Csvfile)
for epoch in range(epochs):
for batch_id, data in enumerate(train_loader()):
x_data, y_data = data
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
# 运行模型前向计算,得到预测值
logits = model(img)
loss = f.binary_cross_entropy_with_logits(logits, label)
aver_loss = paddle.mean(loss)
if batch_id % 20 == 0:
print(f"epoch: {epoch}, batch_id: {batch_id}, loss is: {float(aver_loss.numpy())}")
# 反向传播,更新权重,清除梯度
aver_loss.backward()
optimizer.step()
optimizer.clear_grad()
model.eval()
accuracies, losses = [], []
for batch_id, data in enumerate(valid_loader()):
x_data, y_data = data
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
# 运行模型前向计算,得到预测值
logits = model(img)
# 二分类,relu计算后的结果以0.5为阈值分两个类别
# 计算relu后的预测概率,进行loss计算
pred = f.relu(logits)
loss = f.binary_cross_entropy_with_logits(logits, label)
# 计算预测概率小于0.5的类别
pred2 = pred * (-1.0) + 1.0
# 得到两个类别的预测概率,并沿第一个维度级联
pred = paddle.concat([pred2, pred], axis=1)
acc = paddle.metric.accuracy(pred, paddle.cast(label, dtype='int64'))
accuracies.append(acc.numpy())
losses.append(loss.numpy())
print(f"[validation] accuracy/loss: {np.mean(accuracies)}/{np.mean(losses)}")
model.train()
paddle.save(model.state_dict(), 'palm.pdparams')
paddle.save(optimizer.state_dict(), 'palm.pdopt')
评估函数
def evaluation(model, params_file_path):
paddle.device.set_device('cpu')
print('开始评估...')
# 加载模型参数
model_state_dict = paddle.load(params_file_path)
model.load_dict(model_state_dict)
model.eval()
eval_loader = data_loader(Data_dir1, batch_size=10, mode='eval')
acc_set = []
aver_loss_set = []
for batch_id, data in enumerate(eval_loader()):
x_data, y_data = data
img = paddle.to_tensor(x_data)
label = paddle.to_tensor(y_data)
y_data = y_data.astype(np.int64)
label2 = paddle.to_tensor(y_data)
# 计算预测和精度
prediction, acc = model(img, label2)
# 计算损失函数值
loss = f.binary_cross_entropy_with_logits(prediction, label)
aver_loss = paddle.mean(loss)
acc_set.append(float(acc.numpy()))
aver_loss_set.append(float(aver_loss.numpy()))
# 求平均精度
acc_val_mean = np.array(acc_set).mean()
aver_loss_val_mean = np.array(aver_loss_set).mean()
print(f"loss={aver_loss_val_mean}, acc={acc_val_mean}")
main运行
if __name__ == '__main__':
# 创建模型
model = LeNet(num=1)
# 启动训练过程
opt = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9, parameters=model.parameters())
train_pm(model, optimizer=opt)
evaluation(model, params_file_path="palm.pdparams")
paddle计算机视觉(下)---- 图像分类