pytorch计算模型准确率过程

# Lab 5 Logistic Regression Classifier
import torch
from torch.autograd import Variable
import numpy as np
from sklearn.preprocessing import MinMaxScaler

torch.manual_seed(777)  # for reproducibility

xy = np.loadtxt('./data-03-diabetes.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, -1:]
x_data=MinMaxScaler().fit_transform(x_data)
# Make sure the shape and data are OK
print(x_data.shape, y_data.shape)
# 数据预处理 浮点型tensor并且需要进行梯度运算(Variable)
X = Variable(torch.Tensor(x_data).float())
Y = Variable(torch.Tensor(y_data).float())


# 声明功能层 两层全连接 一个二分类sigmoid激活层
linear1 = torch.nn.Linear(in_features=8, out_features=6, bias=True)
linear = torch.nn.Linear(in_features=6, out_features=1, bias=True)
sigmoid = torch.nn.Sigmoid()
# Sequential构建模型
model = torch.nn.Sequential(linear1,linear,sigmoid)
# 二分类交叉熵损失函数
loss_cost=torch.nn.BCELoss()
# 随机梯度下降优化器 (可换其他)
optimizer = torch.optim.SGD(model.parameters(), lr=0.05)

for step in range(10001):
    optimizer.zero_grad() # 梯度置零
    hypothesis = model(X) # x代入model中计算预测值
    cost=loss_cost(hypothesis,Y) # 损失计算
    cost.backward() # 梯度反向传播计算
    optimizer.step() # 参数权重更新
    # 每迭代200次输出当前损失值
    if step % 200 == 0:
        print(step, cost.data.numpy())

# 0.89>0.5 -> 1  0.14<=0.5 -> 0
# 将预测值的概率归为 0 或 1 类别
predicted = (model(X).data > 0.5).float()
# 与标签计算模型预测的准确率
accuracy = (predicted == Y.data).float().mean()
print("\nHypothesis: ", hypothesis.data.numpy(), "\nCorrect (Y): ", predicted.numpy(), "\nAccuracy: ", accuracy)

你可能感兴趣的:(numpy,python,深度学习)