每一层卷积层由卷积核、池化和激活函数组成
一般情况下最后一层是全连接层
def __init__(self):
super(CNNNET,self).__init__()
self.con1 = nn.Sequential(
nn.Conv2d(3,64,3,1,1),
#3*128*128-->64*128*128
#3个输入,64输出(即卷积核),卷积核大小为3*3或者(3,3),
#1步长,padding = 0.5*(卷积核大小 -1)
nn.MaxPool2d(2),#64*128*128-->64*64*64
#池化层
nn.ReLU(),
)
self.con2 = nn.Sequential(
nn.Conv2d(64, 128, 3, 1, 1),#64*64*64-->128*64*64
nn.MaxPool2d(2), # 128*64*64-->128*32*32
# 池化层
nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(128*32*32, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 10)#最后分类成10种类型
)
前向传播中每一层神经网络层从上向下传递
在全连接层前得将卷积铺成一行out = out.view(out.size(0),-1)
def forward(self,input):
out = self.con1(input)
out = self.con2(out)
out = out.view(out.size(0),-1)
#展开卷积,并排成一排
out = self.fc(out)
return out
训练方式不变
解释在 【神经网络 | 深度学习】pytorch快速搭建网络
import torch
import torch.nn as nn
device = torch.device('cuda:0')
class CNNNET(nn.Module):
def __init__(self):
super(CNNNET,self).__init__()
self.con1 = nn.Sequential(
nn.Conv2d(3,64,3,1,1),#3*128*128-->64*128*128
#3个输入,64输出(即卷积核),卷积核大小为3*3或者(3,3),
#1步长,padding = 0.5*(卷积核大小 -1)
nn.MaxPool2d(2),#64*128*128-->64*64*64
#池化层
nn.ReLU(),
)
self.con2 = nn.Sequential(
nn.Conv2d(64, 128, 3, 1, 1),#64*64*64-->128*64*64
nn.MaxPool2d(2), # 128*64*64-->128*32*32
# 池化层
nn.ReLU(),
)
self.fc = nn.Sequential(
nn.Linear(128*32*32, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 10)#最后分类成10种类型
)
def forward(self,input):
out = self.con1(input)
out = self.con2(out)
out = out.view(out.size(0),-1)
#展开卷积,并排成一排
out = self.fc(out)
return out
def train_(self,x,y):
out = self.forward(x)
loss = self.mls(out,y)
print('loss',loss)
self.opt.zero_grad()
loss.backward()
self.opt.step()
def test(self,x):
return self.forward(x)
cnn = CNNNET()
cnn.cuda()#放入GPU
cnn.cpu()#放回CPU