Pytorch实现Logistic_Regression
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
input_size = 28 * 28
num_classes = 10
num_epochs = 10
batch_size = 100
learning_rate = 0.001
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
model = nn.Linear(input_size, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, input_size)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 ==0:
print('Epoch [{}/{}], Step [{}/{}], Loss:{:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
Epoch [1/10], Step [100/600], Loss:2.2729
Epoch [1/10], Step [200/600], Loss:2.2154
Epoch [1/10], Step [300/600], Loss:2.0251
Epoch [1/10], Step [400/600], Loss:1.9289
Epoch [1/10], Step [500/600], Loss:1.8836
Epoch [1/10], Step [600/600], Loss:1.8227
Epoch [2/10], Step [100/600], Loss:1.7710
Epoch [2/10], Step [200/600], Loss:1.6861
Epoch [2/10], Step [300/600], Loss:1.7019
Epoch [2/10], Step [400/600], Loss:1.5922
Epoch [2/10], Step [500/600], Loss:1.5029
Epoch [2/10], Step [600/600], Loss:1.5454
Epoch [3/10], Step [100/600], Loss:1.5381
Epoch [3/10], Step [200/600], Loss:1.3151
Epoch [3/10], Step [300/600], Loss:1.4771
Epoch [3/10], Step [400/600], Loss:1.2503
Epoch [3/10], Step [500/600], Loss:1.3210
Epoch [3/10], Step [600/600], Loss:1.3356
Epoch [4/10], Step [100/600], Loss:1.2238
Epoch [4/10], Step [200/600], Loss:1.1496
Epoch [4/10], Step [300/600], Loss:1.2525
Epoch [4/10], Step [400/600], Loss:1.1648
Epoch [4/10], Step [500/600], Loss:1.0497
Epoch [4/10], Step [600/600], Loss:1.1387
Epoch [5/10], Step [100/600], Loss:1.0765
Epoch [5/10], Step [200/600], Loss:1.0479
Epoch [5/10], Step [300/600], Loss:1.0902
Epoch [5/10], Step [400/600], Loss:1.0168
Epoch [5/10], Step [500/600], Loss:0.9326
Epoch [5/10], Step [600/600], Loss:1.0073
Epoch [6/10], Step [100/600], Loss:1.0087
Epoch [6/10], Step [200/600], Loss:0.9378
Epoch [6/10], Step [300/600], Loss:0.9467
Epoch [6/10], Step [400/600], Loss:0.9626
Epoch [6/10], Step [500/600], Loss:1.0194
Epoch [6/10], Step [600/600], Loss:0.8813
Epoch [7/10], Step [100/600], Loss:0.9351
Epoch [7/10], Step [200/600], Loss:0.8565
Epoch [7/10], Step [300/600], Loss:0.8663
Epoch [7/10], Step [400/600], Loss:0.8950
Epoch [7/10], Step [500/600], Loss:0.9888
Epoch [7/10], Step [600/600], Loss:0.7603
Epoch [8/10], Step [100/600], Loss:0.7935
Epoch [8/10], Step [200/600], Loss:0.8608
Epoch [8/10], Step [300/600], Loss:0.9538
Epoch [8/10], Step [400/600], Loss:0.7843
Epoch [8/10], Step [500/600], Loss:0.7662
Epoch [8/10], Step [600/600], Loss:0.8472
Epoch [9/10], Step [100/600], Loss:0.8384
Epoch [9/10], Step [200/600], Loss:0.8758
Epoch [9/10], Step [300/600], Loss:0.7508
Epoch [9/10], Step [400/600], Loss:0.7223
Epoch [9/10], Step [500/600], Loss:0.7314
Epoch [9/10], Step [600/600], Loss:0.6978
Epoch [10/10], Step [100/600], Loss:0.6625
Epoch [10/10], Step [200/600], Loss:0.7967
Epoch [10/10], Step [300/600], Loss:0.7854
Epoch [10/10], Step [400/600], Loss:0.6818
Epoch [10/10], Step [500/600], Loss:0.7990
Epoch [10/10], Step [600/600], Loss:0.5855
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, input_size)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
Accuracy of the model on the 10000 test images: 85.55999755859375 %
torch.save(model.state_dict(), 'model_param.ckpt')
model = model.load_state_dict(torch.load('model_param.ckpt'))