torch
torch.nn
nn.Module
torch.nn.functional
torchvision
torchvision.transforms
使用FashionMNIST
train_set = torchvision.datasets.FashionMNIST(
root='./data/FashionMNIST',
train=True,
download=False,
transform=transforms.Compose([
transforms.ToTensor()
])
)
train_dataloader = torch.utils.data.DataLoader(
train_set,
batch_size=10
)
class nopaNet(nn.Module):
def __init__(self):
super(nopaNet, self).__init__()
#输入图像[batch,1,28,28]
#out_channel 代表有几个filter
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.dense1 = nn.Linear(in_features=12*4*4, out_features=120)
self.dense2 = nn.Linear(in_features=120, out_features=60)
self.dense3 = nn.Linear(in_features=60, out_features=10)
def forward(self, data):
conv1_out = self.conv1(data)
conv1_out = F.relu(conv1_out)
conv1_out = F.max_pool2d(conv1_out, kernel_size=2, stride=2)
conv2_out = self.conv2(conv1_out)
conv2_out = F.relu(conv2_out)
conv2_out = F.max_pool2d(conv2_out, kernel_size=2, stride=2)
# flatten直接变成了1*1920
# conv2_out = conv2_out.flatten()
# 当batch是10的时候,实际上应该是10*192,不应该用flatten
conv2_out = conv2_out.reshape(-1,12*4*4)
dense1_out = self.dense1(conv2_out)
dense1_out = F.relu(dense1_out)
dense2_out = self.dense2(dense1_out)
dense2_out = F.relu(dense2_out)
final_out = self.dense3(dense2_out)
return final_out
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
train_set = torchvision.datasets.FashionMNIST(
root='./data/FashionMNIST',
train=True,
download=False,
transform=transforms.Compose([
transforms.ToTensor()
])
)
class nopaNet(nn.Module):
def __init__(self):
super(nopaNet, self).__init__()
#输入图像[batch,1,28,28]
#out_channel 代表有几个filter
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.dense1 = nn.Linear(in_features=12*4*4, out_features=120)
self.dense2 = nn.Linear(in_features=120, out_features=60)
self.dense3 = nn.Linear(in_features=60, out_features=10)
def forward(self, data):
conv1_out = self.conv1(data)
conv1_out = F.relu(conv1_out)
conv1_out = F.max_pool2d(conv1_out, kernel_size=2, stride=2)
conv2_out = self.conv2(conv1_out)
conv2_out = F.relu(conv2_out)
conv2_out = F.max_pool2d(conv2_out, kernel_size=2, stride=2)
# flatten直接变成了1*1920
# conv2_out = conv2_out.flatten()
# 当batch是10的时候,实际上应该是10*192,不应该用flatten
conv2_out = conv2_out.reshape(-1,12*4*4)
dense1_out = self.dense1(conv2_out)
dense1_out = F.relu(dense1_out)
dense2_out = self.dense2(dense1_out)
dense2_out = F.relu(dense2_out)
final_out = self.dense3(dense2_out)
return final_out
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
if __name__ == "__main__":
nopaNet = nopaNet()
#batch为1的预测
# #type(sample) = tuple
# sample = next(iter(train_set))
# image, label = sample
# #shape = [1,28,28]
# # print(image.shape)
# #change image to a batch tensor
# image = image.unsqueeze(0)
# #shape = [1,1,28,28]
# # print(image.shape)
# pre = nopaNet(image)
# print(pre)
# #shape:[10]
# print(pre.shape)
# # #查看预测结果的第一种方式
# # print('pre_label:',pre.argmax())
# # print('true_label:',label)
# #看预测结果第二种,归一化
# print(F.softmax(pre))
# print('pre_label:',pre.argmax())
# print('true_label:',label)
#batch为10的预测
train_dataloader = torch.utils.data.DataLoader(
train_set,
batch_size=10
)
torch.set_grad_enabled(False)
batch = next(iter(train_dataloader))
# #batch是一个list,包括数据和标签
# print(batch)
images, labels = batch
# #[10,1,28,28]
# print(images.shape)
# #[10]
# print(labels.shape)
pred_labels = nopaNet(images)
##[10,10]
# print(pred_labels.shape)
print('pred_labels:',pred_labels.argmax(dim=1))
print('labels:',labels)
#计算预测准确率
print(pred_labels.argmax(dim=1).eq(labels))
#print(pred_labels.argmax(dim=1).eq(labels).sum())
print(get_num_correct(pred_labels,labels))
print('succ')