这里的两个文件需要自己进行下载,资源在我的资源中,路径改成自己的即可
import numpy as np
import h5py
from urllib3.filepost import writer
from torch.utils.tensorboard import SummaryWriter
def load_dataset():
train_dataset = h5py.File('E:\文档\实验3:基于Pytorch实现LeNet5\datasets/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('E:\文档\实验3:基于Pytorch实现LeNet5\datasets/test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
import numpy as np
import torch
from torch import nn, optim, device
from torch.utils.data import Dataset,DataLoader,TensorDataset
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
#用于读取数据集
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
#数据标准化
X_train = X_train_orig/255.
X_test = X_test_orig/255.
#训练集:类型转换+重设大小
X_train = torch.from_numpy(X_train).float()
Y_train = torch.from_numpy(Y_train_orig.T).float()
X_train=X_train.permute(0,3,1,2)
Y_train=Y_train.reshape(Y_train.size(0),)
#将数据放入DataLoader
trainset=TensorDataset(X_train,Y_train)
trainloader=DataLoader(trainset,batch_size=32)
print(X_train.size())
print(Y_train.size())
#测试集:类型转换+重设大小
X_test = torch.from_numpy(X_test).float()
Y_test = torch.from_numpy(Y_test_orig.T).float()
X_test=X_test.permute(0,3,1,2)
Y_test=Y_test.reshape(Y_test.size(0),)
print(X_test.size())
print(Y_test.size())
Y_train=torch.tensor(Y_train,dtype=int)
class Lenet5(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Sequential( # Layer 1, input: (3, 64, 64)
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4, stride=4, padding=0)
)
self.conv2 = nn.Sequential( # Layer 2
nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4, stride=4, padding=0)
)
self.fc = nn.Linear(16 * 4 * 4, 6)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# 展平
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def predict(self, x):
output = self.forward(x)
pred = self.softmax(output)
return torch.max(pred, dim=1)[1]
cnn = Lenet5()
loss_fn = nn.CrossEntropyLoss()
# 使用Adam优化算法
optimizer = torch.optim.Adam(cnn.parameters(), lr= 0.009)
def model(train_x, train_y, lr = 0.009, epochs = 100, batch_size = 64, pc = True):
# 加载数据
train_loader = data_loader(train_x, train_y)
for e in range(epochs):
epoch_cost = 0
for step, (batch_x, batch_y) in enumerate(train_loader):
# 前向传播
y_pred = cnn.forward(batch_x)
# 损失函数
loss = loss_fn(y_pred, batch_y)
epoch_cost += loss
# 梯度归零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 更新参数
optimizer.step()
epoch_cost /= step + 1
if (e+1)% 10 == 0:
# 评估准确度
writer.add_scalar(tag=f'CNN-lr={lr},epochs={epochs}', scalar_value=epoch_cost, global_step=e)
y_pred = cnn.predict(train_x)
print(f'epoch={(e+1)},loss={loss}',f'Train Accuracy: {torch.sum(y_pred == train_y) / y_pred.shape[0] * 100:.2f}%')
设置一个SummaryWriter用于数据可视化
writer = SummaryWriter('logs')
测试准确率
#测试
train_pred = cnn.predict(X_train)
#计算准确率
print(f'train Acc: {torch.sum(train_pred == train_y_) / train_pred.shape[0] * 100:.2f}%')