pytorch入门 AlexNet

知识点1、池化层 与 全连接层 之间要把2为图像拉直
知识点2、net 输入格式
知识点3、把图像转化为 net 输入
知识点4、output 的格式
知识点5、output.max(1)
知识点6、统计预测正确数量
知识点7、跑数据的结构(重点)
知识点8、torch.utils.data.DataLoader
知识点9、optimizer 的输入
知识点10、面对对象编程思维
知识点11、跑数据的5个要素

import torch
from torch import
import numpy as np nn
from torch.autograd import Variable
from torchvision.datasets import CIFAR10
from datetime import datetime

知识点1
最后一个层池化层 与 全连接层之间 需要有一个把2维的图像拉直为1维的操作 view 同时,不要动batch

class AlexNet(nn.Module):
	def __init__(self):
		super(AlexNet, self).__init__()
		self.conv1 = nn.Sequential(
		nn.Conv2d(3, 64, 5),
		nn.ReLU(True)                    # 这里用True 可以减少内存消耗,但是也有可能报错
		)
		self.max_pool1 = nn.MaxPool2d(3, 2)
		self.conv2 = nn.Sequential(
		nn.Conv2d(64, 64, 5),
		nn.ReLU(True)
		)
		self.max_pool2 = nn.MaxPool2d(3, 2)
		self.fc1 = nn.Sequential(
		nn.Linear(1024, 384),
		nn.ReLU(True)
		)
		self.fc2 = nn.Sequential(
		nn.Linear(384, 192),
		nn.ReLU(True)
		)
		self.fc3 = nn.Linear(192, 10)    # 分类问题,这里不要再ReLU了
	
	def forward(self, x):
		x = self.conv1(x)
		x = self.max_pool1(x)
		x = self.conv2(x)
		x = self.max_pool2(x)
		x = x.view(x.shape[0], -1)  # 这里千万不要忘了    把 这里x 有4个维度 分别是 batch_size  channels  height width
		x = self.fc1(x)
		x = self.fc2(x)
		x = self.fc3(x)
		return x

知识点2
nn.Module 定义的net 输入格式必须是(batch_size, channels, height, width)的tensor

# 测试一下AlexNet是否可用
alexnet = AlexNet()
input_demo = Variable(torch.zeros(1, 3, 32, 32))  # 定义的net 输入必须是4个维度的tensor 分别是 batch_size  channels  height width
output_demo = alexnet(input_demo)
print(output_demo)
print(output_demo.shape)

知识点3
基操记住了,输入是图像,所以要转化为numpy 且 为了后面标准化,需要是float

def data_tf(x):
	x = np.array(x, dtype='float32') / 255
	x = (x - 0.5) / 0.5
	x = x.transpose((2, 0, 1))  # 这里是为了配合pytorch而做的轴变换  把原来的(height, width, channles) 改为 (channles, height, width)
	x = torch.from_numpy(x)
	return x

知识点4
output 有2个维度 (batch_size, scores)
知识点5
output.max(1) 返回 每行最大值,和索引
知识点6
(pred_label == label).sum().data.item() 返回预测正确的数量 记住

def get_acc(output, label):
	total = output.shape[0] # output 有2个维度    (batch_size, scores)
	_, pred_label = output.max(1)
	return (pred_label == label).sum().data.item() / total
	# return (output == label).sum() / len(output)

知识点7(重点)

跑数据的结构

  1. 把网络放到GPU中,记录时间1
  2. 定义循环
    ______________train
    ____2.1 定义train loss 和 accuracy 的容器
    ____2.2 开启训练模式
    ____2.3 循环train data
    ________2.3.1 把 data 和 label 放入GPU
    ________2.3.2 output = net(data)
    ________2.3.3 计算loss, zero_grad, backward(), step()
    ________2.3.3 记录本次loss, accuracy
    ____2.4 记录时间2
    ____2.5 计算消耗的时间
    ______________predict
    ____2.6 定义valid loss 和 accuracy 的容器
    ____2.7 开启预测模式
    ____2.8 循环valid data
    ________2.8.1 把 data 和 label 放入GPU
    ________2.8.2 output = net(data)
    ________2.8.3 计算loss
    ________2.8.4 记录本次loss, accuracy
    ____2.9 记录时间3
    ____2.10 打印信息
def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
	if torch.cuda.is_available():
		net = net.cuda()
	prev_time = datetime.now()
	for epoch in range(num_epochs):
		train_loss = 0
		train_acc = 0
		net = net.train()
		for im, label in train_data:
			im = Variable(im.cuda())
			label = Variable(label.cuda())
			output = net(im)
			loss = criterion(output, label)
			optimizer.zero_grad()
			loss.backward()
			optimizer.step()
			train_loss += loss.data.item()
			train_acc += get_acc(output, label)
		cur_time = datetime.now()
		hours, remanider = divmod((cur_time - prev_time).seconds, 3600)
		minutes, seconds = divmod(remainder, 60)
		time_str = "time %02d:%02d:%02d" % (hours, minutes, seconds)
		if valid_data is not None:
			valid_loss = 0
			valid_acc = 0
			net.eval()
			for im, label in valid_data:
				im = Variable(im.cuda())
				label = Variable(label.cuda())
				output = net(im)
				loss = criterion(output, label)
				valid_loss += loss.data.item()
				valid_acc += get_acc(output, label)
			epoch_str=(
			"Epoch %d. Train Loss: %f, Train Acc: %f, Valid Loss: %f, Valid Acc: %f," % (epoch, train_loss / len(train_data), train_acc / len(train_data), valid_loss / len(valid_data), valid_acc / len(valid_data))
			)
		prev_time = cur_time
		print(epoch_str + time_str)
				

知识点8
记得要对导入的数据集打包一下,用torch.utils.data.DataLoader
知识点9
torch.optim.SGD()的2个要素,网络的parameter 和 learning rate
知识点10
面对对象编程思维
知识点11
跑数据的5个要素 train_data, test_data, epochs, optimizer, criterion

from torch.utils.data import DataLoader
train_set = CIFAR10('./data', train=True, transform=data_tf, download=True)
train_data = DataLoader(train_set, batch_size=64, shuffle=True)
test_set = CIFAR10('./data', train=False, transform=data_tf, download=True)
test_data = DataLoader(test_set, batch_size=128, shuffle=False)
net = AlexNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=1e-1)
epochs = 20
train(net, train_data, test_data, epochs, optimizer, criterion)       # 面向对象化编程

你可能感兴趣的:(习惯养成,pytorch,Deep,Learning)