# 通过自定义的DiabetesDataset类,进行numpy->tensor的转换,以及调整数据类型
# train_data_BP--(10000,8)train_label_BP--(10000,1)
train_dataset = DiabetesDataset(train_data_BP, train_label_BP)
# 传递数据集,设置样本数量为64,打乱数据,并行工作的数量为4
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, num_workers=4) # num_workers 多线程
DiabetesDataset类
class DiabetesDataset(Dataset):
def __init__(self, data, label):
self.len = data.shape[0] # shape(多少行,多少列)
self.x_data = torch.from_numpy(data)
self.x_data = self.x_data.float()
self.y_data = torch.from_numpy(label)
self.y_data = self.y_data.float()
# 通过数据的索引取出数据
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# 返回数据集的长度
def __len__(self):
return self.len
用torch.nn.Module继承父类搭建BP神经网络
class Model(torch.nn.Module):
def __init__(self):
# 构造函数,相当于在声明,还没有进行应用
super(Model, self).__init__() # 调用父类初始化器
self.linear1 = torch.nn.Linear(8, 12)
self.linear2 = torch.nn.Linear(12, 20)
self.linear3 = torch.nn.Linear(20, 15)
self.linear4 = torch.nn.Linear(15, 10)
self.linear5 = torch.nn.Linear(10, 4)
self.linear6 = torch.nn.Linear(4, 1)
self.RELU = torch.nn.ReLU()
# 前向传播
def forward(self, x):
x = self.RELU(self.linear1(x))
x = self.RELU(self.linear2(x))
x = self.RELU(self.linear3(x))
x = self.RELU(self.linear4(x))
x = self.RELU(self.linear5(x))
x = self.linear6(x)
# 返回x
return x
# 构建BP网络
bp = Model().cuda() # 把模型放在GPU
# 构造MSELoss
loss = torch.nn.MSELoss()
# Adam优化
optimizer = torch.optim.Adam(bp.parameters(), lr=lr)
训练过程定义一个函数
def train(epoch,train_loader,model,criterion,optimizer):
train_loss = 0.0
count = 0
for i, data in enumerate(train_loader, 0):
# 将输入x和相应的标签y从数据中拿出来
inputs, labels = data
# 把数据放在GPU
inputs = inputs.cuda()
labels = labels.cuda()
# 将输入送入模型中,求y的预测值
y_pred = model(inputs)
# 计算损失
loss = criterion(y_pred, labels).cuda()
# 梯度归零
optimizer.zero_grad()
# 反向传播
loss.backward()
# 优化更新
optimizer.step()
# 计算总的损失
train_loss += loss.item()
# 训练的次数
count = i
if epoch % 2 == 0:
# 输出平均损失
print("train loss:", train_loss/count, end=',')
return model,train_loss/count
进行训练并保存最优模型
last_loss = 1000
for epoch in range(epochs):
print("epoch:", epoch)
trained_bp,current_best_loss = train(epoch, train_loader, bp, loss, optimizer)
if is_save:
if current_best_loss < last_loss:
torch.save(trained_bp, 'pt2/BP_parameters_8i_6l_jb2_all.pt')
print("Save model......")
last_loss = current_best_loss
def test(data,model):
# 而对于tensor的计算操作,默认是要进行计算图的构建的,在这种情况下,
# 可以使用 with torch.no_grad():,强制之后的内容不进行计算图构建。
data = torch.from_numpy(data).cuda()
x_data = data.float()
with torch.no_grad():
# 将输入送入模型中,求y的预测值
y_pred = model(x_data).cpu()
return y_pred