本人是pytorch新人,学过莫烦的教程,也学过他人的,我觉的对于神经网络入门来讲的话,刘二大人的课程是真心推荐,可让初学者更加清楚的认识神经网络,还有代码的逐行讲解,非常,非常,非常棒,受益匪浅!
以下是学习笔记,供自己以后回顾!
1.in_size=x.size(0)#x会返回(b,c,w,h)这里需要batch的大小
2.torch.cat(outputs,dim=1) #输出size[b,c,w,h],所以dim=1沿着第一个维度C拼接
学习视频
https://dashee87.github.io/deep%20learning/visualising-activation-functions-in-neural-networks/
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
##第1步. 法1.加载dataset
class DiabetesDataset(Dataset):
def __init__(self, filepath):
pass
def __getitem__(self, index):
pass
def __len__(self):
pass
## 法2.加载现成的dataset,不用手动分
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
train_dataset = datasets.MNIST(root='../dataset/mnist',
train=True,
transform= transforms.ToTensor(),
download=True)
##第2步. dataloader
dataset = DiabetesDataset(pathway)
train_loader = DataLoader(dataset=dataset,
batch_size=32,
shuffle=True,
num_workers=3)
##第3步. 使用迭代更新
if __name__ == '__main__':
for epoch in range(100):
for i, (inputs,labels) in enumerate(train_loader, 0):
pass
3.Mnist数据集分析
#torchvision.datasets含有许多数据集
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
batch_size = 1
train_dataset = datasets.MNIST(root='../dataset/mnist',
train=True,
download=True,
transform=transforms.ToTensor(),
)
train_loader = DataLoader(train_dataset,
shuffle=False,
batch_size=batch_size
)
for data, label in train_loader:
out=data.data.squeeze().numpy()
plt.imshow(out,cmap='gray')
# print(data.data.squeeze().shape)
print('data:{} labe:{} out:{}'.format(data.shape, label.shape, out.shape))
plt.show()
# 1. 用numpy实现
import numpy as np
y=np.array([1,0,0])
z=np.array([0.2,0.1,0.1])
y_pred = np.exp(z)/np.exp(z).sum()
loss = (-y * np.log(y_pred)).sum()
print(loss)
# 2. 用Tensor实现
import torch
criterion = torch.nn.CrossEntropyLoss()
Y = torch.LongTensor([2,0,1])
Y_pred1 = torch.Tensor([[0.1,0.2,0.9],
[1.1,0.1,0.2],
[0.2,2.1,0.1]]
)
Y_pred2 = torch.Tensor([[0.8, 0.2, 0.3],
[0.2, 0.3, 0.5],
[0.2, 0.2, 0.5]])
l1 = criterion(Y_pred1, Y)
l2 = criterion(Y_pred2, Y)
print("Batch Loss1 = ", l1.data, "\nBatch Loss2=", l2.data)
#mnist多分类任务one-hot,以Linear全连接的方式,进行分类
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
batch_size = 64
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))#你均值和方差都只传入一个参数,就报错了.
# 这个函数的功能是把输入图片数据转化为给定均值和方差的高斯分布,使模型更容易收敛。图片数据是r,g,b格式,对应r,g,b三个通道数据都要转换。
])
train_dataset = datasets.MNIST(root='../dataset/mnist/',
train=True,
download=True,
transform=transform)
train_loader = DataLoader(train_dataset,
shuffle=True,
batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/',
train=False,
download=True,
transform=transform)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=batch_size)
class Net(torch.nn.Module):
def __init__(self):
super(Net,self).__init__()
self.l1 =torch.nn.Linear(784,512)
self.l2 = torch.nn.Linear(512, 256)
self.l3 = torch.nn.Linear(256, 128)
self.l4 = torch.nn.Linear(128, 64)
self.l5 = torch.nn.Linear(64, 10)
def forward(self,x):
x = x.view(-1,784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
model = Net()
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx , data in enumerate(train_loader, 0):
inputs,target=data
optimizer.zero_grad()
outputs = model(inputs)#outputs:64*10,行表示对于图片的预测,batch=64
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
running_loss+=loss.item()
if batch_idx %300 ==299:
print('[%d,%5d] loss: %.3f'%(epoch+1,batch_idx+1,running_loss/300))
running_loss=0.0
def test():
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data,dim=1)
total+=labels.size(0)#每一批=64个,所以total迭代一次加64
correct +=(predicted==labels).sum().item()
print('Accuracy on test set:%d %%'%(100*correct/total))
if __name__ =="__main__":
for epoch in range(10):
train(epoch)#封装起来,若要修改主干就很方便
test()
1. 使用的是全连接层,权重不够多
2. 对于图像来讲我们更关注高级别抽象的东西,对于这个数据级来讲我们使用的是低级的数字特征(将图片变成ToTensor)。
3. 特征提取:人工:傅里叶(周期性有缺陷),小波
自动:CNN
2. 相机成像:有可能通过插值的方式,谎报自己相素高
采用三种光敏电阻的话,就构成RGB
这里少加了bias,最后等于的就是一个feature map
初始化kernel_weight
import torch
input = [3,4,6,5,7,
2,4,6,8,2,
1,6,7,8,4,
9,7,4,6,2,
3,7,5,4,1,]
input=torch.Tensor(input).view(1,1,5,5)#B*C*W*H
conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)#输入C,输出C,kernel
kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)#输出C,输入C*W*H
conv_layer.weight.data = kernel.data
output=conv_layer(input)
print(output)
对于复杂的网络,可以先进行封装(封成inception,residual),再调用
outputs = [branch1*1,branch5*5,branch3*3,branch_pool]
return torch.cat(outputs,dim=1) #输出size[b,c,w,h],所以dim=1沿着第一个维度C拼接
class InceptionA(torch.nn.Module):#inception网络要保证b,w,h一样,c可以不同,下例就是c=24*3+16=88
def __init__(self,in_channels):
super(InceptionA,self).__init__()
self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
self.branch1x1=nn.Conv2d(in_channels,16,kernel_size=1)
self.branch5x5_1=nn.Conv2d(in_channels,16,kernel_size=1)#最后卷积核为5x5的那一分支的两个过程
self.branch5x5_2=nn.Conv2d(16,24,kernel_size=5,padding=2)
self.branch3x3_1=nn.Conv2d(in_channels,16,kernel_size=1)
self.branch3x3_2=nn.Conv2d(16,24,kernel_size=3,padding=1)
self.branch3x3_3=nn.Conv2d(24,24,kernel_size=3,padding=1)
def forward(self,x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3 = self.branch3x3_3(branch3x3)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
return torch.cat(outputs, dim=1)
cell =torch.nn.RNNCell(input_size=input_size,hidden_size=hidden_size)
hidden=cell(input,hidden) #调用cell实例,h1=cell(x1,h0)
#x1维度=[batch,input_size]
#hidden维度 =[batch,h]
#dataset被压缩成(seq,b,x),然后经过遍历得到(b,x),将这一序列的值输出
import torch
import torch.nn as nn
batch_size=1
seq_len=3
input_size =4
hidden_size =2
cell =torch.nn.RNNCell(input_size=input_size,hidden_size=hidden_size)
dataset = torch.randn(seq_len,batch_size,input_size)
hidden = torch.zeros(batch_size,hidden_size)
for idx, input in enumerate(dataset):
print('='*20,idx,'='*20)
print(input)
print('input size: ',input.shape)
hidden=cell(input,hidden)
print("output size: ",hidden.shape)
print(hidden)
print('='*20,'='*20)
print(dataset)
print(dataset.shape)
==================== 0 ====================
tensor([[-1.6088, -0.9138, -1.4379, 0.4623]])
input size: torch.Size([1, 4])
output size: torch.Size([1, 2])
tensor([[-0.3658, -0.9393]], grad_fn=<TanhBackward>)
==================== 1 ====================
tensor([[ 0.5904, -1.4630, 0.3164, 1.9257]])
input size: torch.Size([1, 4])
output size: torch.Size([1, 2])
tensor([[ 0.1977, -0.9414]], grad_fn=<TanhBackward>)
==================== 2 ====================
tensor([[ 0.3129, -0.9360, -0.0654, -0.2440]])
input size: torch.Size([1, 4])
output size: torch.Size([1, 2])
tensor([[-0.0490, -0.6150]], grad_fn=<TanhBackward>)
==================== ====================
tensor([[[-1.6088, -0.9138, -1.4379, 0.4623]],
[[ 0.5904, -1.4630, 0.3164, 1.9257]],
[[ 0.3129, -0.9360, -0.0654, -0.2440]]])
torch.Size([3, 1, 4])