import torch
in_channels, out_channels = 5, 10
width, height = 100, 100 # 宽和高的大小
kernel_size = 3 # 卷积核的大小
batch_size = 1
input = torch.randn(batch_size, in_channels, width, height)
print(input)
conv_layer = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size)
output = conv_layer(input)
print(input.shape)
print(output.shape)
print(conv_layer.weight.shape)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,(5,5))
self.conv2 = torch.nn.Conv2d(10,20,(5,5))
self.maxpooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320,10)
def forward(self,x):
# 将数据从 (n,1,28,28) 转为 (n,784)
#统计 minibatch 的大小
batch_size = x.size(0)
x = F.relu(self.maxpooling(self.conv1(x)))
x = F.relu(self.maxpooling(self.conv2(x)))
#将批量输入的图片转为 张数 * N
# 注意 批量数据矩阵 一行表示一个数据
x = x.view(batch_size,-1)
x = self.fc(x)
return x
https://blog.csdn.net/weixin_42521185/article/details/123789635
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch
import matplotlib.pyplot as plt
transform = transforms.Compose([
transforms.ToTensor(), #将图像转为tensor向量即每一行叠加起来,会丧失空间结构,且取值为0-1
transforms.Normalize((0.1307,),(0.3081,)) #第一个是均值,第二个是标准差,需要提前算出,这两个参数都是mnist的
])
batch_size = 64
train_dataset = datasets.MNIST(root='../dataset/mnist',
train = True,
download=True,
transform=transform
)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist',
train = False,
download=False,
transform=transform
)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1,10,(5,5))
self.conv2 = torch.nn.Conv2d(10,20,(5,5))
self.maxpooling = torch.nn.MaxPool2d(2)
self.fc = torch.nn.Linear(320,10)
def forward(self,x):
# 将数据从 (n,1,28,28) 转为 (n,784)
#统计 minibatch 的大小
batch_size = x.size(0)
x = F.relu(self.maxpooling(self.conv1(x)))
x = F.relu(self.maxpooling(self.conv2(x)))
#将批量输入的图片转为 张数 * N
# 注意 批量数据矩阵 一行表示一个数据
x = x.view(batch_size,-1)
x = self.fc(x)
return x
model = Net()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
#使用 GPU 加速
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
def train(epoch):
running_loss = 0.0
# batch_idx 的范围是从 0-937 共938个 因为 batch为64,共60000个数据,所以输入矩阵为 (64*N)
for batch_idx,data in enumerate(train_loader,0):
x ,y = data
x,y = x.to(device),y.to(device) #装入GPU
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred,y) #计算交叉熵损失
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx%300 == 299:
print("[%d,%5d] loss:%.3f"%(epoch+1,batch_idx+1,running_loss/300))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
x,y = data
x,y = x.to(device),y.to(device)
y_pred = model(x)
_,predicted = torch.max(y_pred.data,dim=1)
total += y.size(0)
correct += (predicted==y).sum().item()
print('accuracy on test set:%d%% [%d/%d]'%(100*correct/total,correct,total))
accuracy_list.append(100*correct/total)
if __name__ == '__main__':
accuracy_list = []
for epoch in range(10):
train(epoch)
test()
plt.plot(np.linspace(1,10,10),accuracy_list)
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()