class DiabetesDataset(Dataset):
def __init__(self, filepath):
xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
self.xdata = torch.from_numpy(xy[:, :-1])
self.ydata = torch.from_numpy(xy[:, [-1]])
def __getitem__(self, index):
return self.xdata[index], self.ydata[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, num_workers=0)
示意图中选择了batch_size=2,于是取每两组样本为mini-batch。而在程序中,我们选取了batch_size=32,于是取每32个样本为一个mini-batch,最后mini-batch根据具体的样本总数决定其包含的样本数量。
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
class DiabetesDataset(Dataset):
def __init__(self, filepath):
xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
self.xdata = torch.from_numpy(xy[:, :-1])
self.ydata = torch.from_numpy(xy[:, [-1]])
def __getitem__(self, index):
return self.xdata[index], self.ydata[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True, num_workers=0)
xtest = dataset.xdata
ytest = dataset.ydata
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8,6)
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
criterion = torch.nn.BCELoss(reduction='mean')
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
costlist = []
acclist = []
# 若使用的是非linux系统,则以下循环部分需要封装
for epoch in range(10000):
l = 0
for i, data in enumerate(train_loader, 0):
# 1. Prepare data
inputs, labels = data
# 2. Forward
ypred = model(inputs)
loss = criterion(ypred, labels)
l += loss.item()
#print(epoch, i, loss.item())
# 3. Backward
optimizer.zero_grad()
loss.backward()
# 4. Updata
optimizer.step()
costlist.append(l / len(inputs))
# 每迭代1000次测试一次精确度
if epoch % 1000 == 999:
ypredtest = model(xtest)
ypredlabel = torch.where(ypredtest>0.5, torch.tensor([1]), torch.tensor([0]))
acc = torch.eq(ypredlabel, ytest).sum().item() / ytest.size(0)
acclist.append(acc)
print('the accuracy of testdataset:', acc)
plt.figure(figsize=(10,4))
plt.subplot(1, 2, 1)
plt.plot(range(10000), costlist)
plt.title('error of mini-batch')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.subplot(1, 2, 2)
plt.plot(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])*1000 , acclist)
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('the accuracy of test dataset')
plt.show()
the accuracy of testdataset: 0.8234519104084321
the accuracy of testdataset: 0.8313570487483531
the accuracy of testdataset: 0.855072463768116
the accuracy of testdataset: 0.8629776021080369
the accuracy of testdataset: 0.8669301712779973
the accuracy of testdataset: 0.8682476943346509
the accuracy of testdataset: 0.8722002635046113
the accuracy of testdataset: 0.8748353096179183
the accuracy of testdataset: 0.8761528326745718
the accuracy of testdataset: 0.8774703557312253
由于我的计算机的性能的限制,且mini-batch的使用对于计算力的耗费更大,所以我们只计算了10000步,更长的步数需要花的时间更多,预计迭代步数突破10w后,会得到一个准确率更高的好结果。