B站pytorch学习笔记(刘二大人)

算法:贪心算法,穷举法,分治法,动态规划
visdom可视化工具
np.meshgrid()用于三维图
训练失败可能是学习率太大

线性模型

import numpy as np
import matplotlib.pyplot as plt

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

def forward(x):
    return x * w

def loss(x, y):
    y_pred = forward(x)
    return (y_pred - y) * (y_pred - y)

w_list = []
mse_list = []
for w in np.arange(0.0, 4.1, 0.1):
    print('w=', w)
    l_sum = 0
    for x_val, y_val in zip(x_data, y_data):
        y_pred_val = forward(x_val)
        loss_val = loss(x_val, y_val)
        l_sum += loss_val
        print('\t', x_val, y_val, y_pred_val, loss_val)
    print('MSE=', l_sum / 3)
    w_list.append(w)
    mse_list.append(l_sum / 3)

plt.plot(w_list, mse_list)
plt.ylabel('Loss')
plt.xlabel('w')
plt.show()

梯度下降

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w = 1.0

def forward(x):
    return x * w

def cost(xs, ys):
    cost = 0
    for x, y in zip(xs, ys):
        y_pred = forward(x)
        cost += (y_pred - y) ** 2
    return cost / len(xs)

def gradient(xs, ys):
    grad = 0
    for x, y in zip(xs, ys):
        grad += 2 * x * (x * w - y) # 求导的导数公式
    return grad / len(xs)

print('predict (before training)', 4, forward(4))
for epoch in range(100):
    cost_val = cost(x_data, y_data)
    grad_val = gradient(x_data, y_data)
    w -= 0.01 * grad_val
    print('epoch:', epoch, 'w=', w, 'loss=', cost_val)
print('predict (after training)', 4, forward(4))

随机梯度下降:取一个损失更新(以前是平均全部损失)(一加入了噪声,二大样本计算量大)

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w = 1.0

def forward(x):
    return x * w

def loss(x, y):
    y_pred = forward(x)
    return (y_pred - y) ** 2

def gradient(x, y):
    return 2 * x * (x * w - y)

print('predict (before training)', 4, forward(4))
for epoch in range(100):
    for x, y in zip(x_data, y_data):
        grad = gradient(x, y)
        w -= 0.01 * grad # 及时更新,没办法并行运算,,batch
        print('\tgrad:', x, y, grad)
        l = loss(x, y)
    print("progress:", epoch, "w=", w, "loss", l)
print('predict (after training)', 4, forward(4))

反向传播

import torch

x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]

w = torch.Tensor([1.0])
w.requires_grad = True  # 需要计算梯度

def forward(x):
    return x * w  # tensor
    
def loss(x, y):
    y_pred = forward(x)
    return (y_pred - y) ** 2

print('predict (before training)', 4, forward(4).item())
for epoch in range(100):
    for x, y in zip(x_data, y_data):
        l = loss(x, y) # 前向,计算loss
        l.backward() # 做完后计算图会释放
        print('\tgrad:', x, y, w.grad.item())  # item取值,要是张量计算图一直累积
        w.data -= 0.01 * w.grad.data  # 不取data会是TENSOR有计算图

        w.grad.data.zero_()  # 计算出来的梯度不清零会累加
    print("progress:", epoch, l.item())
print('predict (after training)', 4, forward(4).item())

pytorch 实现线性回归

import torch

x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])  # 这是几位的?3x1还是1x3

class LinearModel(torch.nn.Module):  # 可记住这个模板
    def __init__(self):  # 构造函数
        super(LinearModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)  # 构造对象,权重和偏执,,1是特征,输入输出维度

    def forward(self, x):  # model类自动反向传播
        y_pred = self.linear(x)
        return y_pred

model = LinearModel()  # 实例化,可调用

criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # 优化器不会构建计算图,能找到参数
 # Adagrad,Adam,Adamax,ASGD,LBFGS,RMSprop,Rprop
for epoch in range(100):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    print(epoch, loss)  # 自动调用__str__,loss不会产生计算图
    optimizer.zero_grad()  # 清零
    loss.backward()  # 反向传播
    optimizer.step()  # 权重更新

print('w=', model.linear.weight.item())
print('b=', model.linear.bias.item())
x_test = torch.Tensor([[4.0]])
y_test = model(x_test)
print('y_pred=', y_test.data)

Logistic回归σ输出0-1

import torch
import torch.nn.functional as F
# import torchvision
# train_set = torchvision.datasets.MNIST(root='../dataset/mnist', train=True, download=True)
# test_set = torchvision.datasets.MNIST(root='../dataset/mnist', train=False, download=True)

x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[0], [0], [1]]) # 变化

class LogisticRegressionModel(torch.nn.Module):
    def __init__(self):
        super(LogisticRegressionModel, self).__init__()
        self.linear = torch.nn.Linear(1, 1)

    def forward(self, x):
        y_pred = F.sigmoid(self.linear(x))  # 变化
        return y_pred

model = LogisticRegressionModel()

criterion = torch.nn.BCELoss(size_average=False)  # 变化
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

for epoch in range(100):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    print(epoch, loss)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

多特征输入

xy = np.loadtxt('diabetes.csv.gz', delimiter=',', dtype=np.float32)
x_data = torch.from_numpy(xy[:, :-1])  # 所有行,不要最后一列
y_data = torch.from_numpy(xy[:, [-1]])  # 所有行,只要最后一列,且为矩阵
# x_data = torch.Tensor([[1.0], [2.0], [3.0]])
# y_data = torch.Tensor([[0], [0], [1]])
class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(8, 6)
        self.linear2 = torch.nn.Linear(6, 4)
        self.linear3 = torch.nn.Linear(4, 1)
        self.activate = torch.nn.Sigmoid()  # 与上一个不同,这个是nn下的,一个模块,没有参数只需要一个
         # self.activate = torch.nn.ReLU()  # 用这个激活时,forward最后一个最好用sigmoid平滑一下
    def forward(self, x):
        x = self.activate(self.linear1(x))
        x = self.activate(self.linear2(x))
        x = self.activate(self.linear3(x))
        return x

加载数据集

import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader

class DiabeteDataset(Dataset):
    def __init__(self, filepath):
        xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
        self.len = xy.shape[0]  # xy是n行9列,shape得到n
        self.x_data = torch.from_numpy(xy[:, :-1])
        self.y_data = torch.from_numpy(xy[:, [-1]])

    def __getitem__(self, index):  # 方便获得索引
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len  # 方便返回长度?

dataset = DiabeteDataset('diabates.CGV.gz')
train_loader = DataLoader(dataset=dataset,
                          batch_size=32,
                          shuffle=True,
                          num_workers=2)

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(8, 6)
        self.linear2 = torch.nn.Linear(6, 4)
        self.linear3 = torch.nn.Linear(4, 1)
        self.sigmoid = torch.nn.Sigmoid()  # 与上一个不同,这个是nn下的,一个模块,没有参数只需要一个
         # self.activate = torch.nn.ReLU()
    def forward(self, x):
        x = self.sigmoid(self.linear1(x))
        x = self.sigmoid(self.linear2(x))
        x = self.sigmoid(self.linear3(x))
        return x

model = Model()

criterion = torch.nn.BCELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

if __name__=='__main__':  # 封装
    for epoch in range(100):
        for i, data in enumerate(train_loader, 0):  # 获得第几次迭代,0是从0开始枚举,数据自动转为tensor
            inputs, labels = data
            # for i, (inputs, labels) in enumerate()
            y_pred = model(inputs)
            loss = criterion(y_pred, labels)
            print(epoch, loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

多分类问题
torch.nn.CrossEntropyLoss=LogSoftmax+NLLLoss
交叉熵损失:最后一层的非线性变换不做,包含在交叉熵计算中,与标签做的ONE-HOT,得loss,标签的是长整型LongTensor

y = np.array([1, 0, 0])
z = np.array([0.2, 0.1, -0.1])
y_pred = np.exp(z) / np.exp(z).sum()
loss = (- y * np.log(y_pred)).sum()
print(loss)
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

batch_size = 64
transforms = transforms.Compose([
    transforms.ToTensor(),  # 单通道变多通道,前面加一维1,convert the PIL Image to Tensor
    transforms.Normalize((0.1307, ), (0.3081, ))  # 归一化,均值,标准差,,因为01发布数据效果更好
])

train_dataset = datasets.MNIST(root='../dataset/mnist/',
                               train=True,
                               download=True,
                               transform=transforms)
train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)

test_dataset = datasets.MNIST(root='../dataset/mnist/',
                               train=True,
                               download=True,
                               transform=transforms)
test_loader = DataLoader(test_dataset,
                          shuffle=False,
                          batch_size=batch_size)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = torch.nn.Linear(784, 512)
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)
        
    def forward(self, x):
        x = x.view(-1, 784)  # 这个图片数变成一行
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)
    
model = Net()

criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

def train(epoch):
    running_loss = 0.0
    for batch_idx, (inputs, target) in enumerate(train_loader, 0):
        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))

def test():
    correct = 0
    total = 0
    with torch.no_grad():  # test中不需要计算梯度
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # 沿着行的维度,返回最大值和最大值下标
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('Accuracy on test set: %d %%' % (100 * correct / total))
    
if __name__=='__main__':
    for epoch in range(10):
        train(epoch)
        test()

卷积神经网络

import torch
in_channels, out_channels = 5, 10
width, height = 100, 100
kernel_size = 3
batch_size = 1

in_put = torch.randn(batch_size,
                      in_channels,
                      width,
                      height)
# input = [3, 4, 5, 6, 7,
#          2, 4, 6, 8, 2,
#          3, 5, 6, 6, 2,
#          3, 5, 6, 6, 4,
#          4, 8, 6, 4, 1]
# input = torch.Tensor(input).view(1, 1, 5, 5)  # B C W H
conv_layer = torch.nn.Conv2d(in_channels,
                             out_channels,
                             kernel_size=kernel_size)  # padding=1, bias=False
# maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2)  # 默认卷积核为2x2取最大
# kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)  # 卷积核,改变形状(输出o输入I W H)
# conv_layer.weight.data = kernel.data
output = conv_layer(in_put)
print(in_put.shape)
print(output.shape)
print(conv_layer.weight.shape)

gpu:先模型
device = torch.device(“cuda:0” if torch.cuda.is_available() else “cpu”)
model.to(device)
训练时的输入输出
inputs, target = inputs.to(device), target.to(device)
测试时
inputs, target = inputs.to(device), target.to(device)

高级卷积神经网络
gugolenet(多种形式的卷积核)
1x1的卷积核 改变通道数,实际运算量会变小。
每一层训练好后加锁才训练下一层,解决梯度消失(难)。跳连接(倒数+1来防止梯度消失

class InceptionA(nn.Module):
    def __init__(self, in_channels):
        super(InceptionA, self).__init__()
        self.branch1x1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        
        self.branch5x5_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch5x5_2 = nn.Conv2d(16, 24, kernel_size=5, padding=2)
        
        self.branch3x3_1 = nn.Conv2d(in_channels, 16, kernel_size=1)
        self.branch3x3_2 = nn.Conv2d(16, 24, kernel_size=3, padding=1)
        self.branch3x3_3 = nn.Conv2d(24, 24, kernel_size=3, padding=1)
        
        self.branch_pool = nn.Conv2d(in_channels, 24, kernel_size=1)
        
    def forward(self, x):
        branch1x1 = self.branch1x1(x)
        
        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)
        
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)
        
        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.branch_pool(branch_pool)
        
        outputs = [branch1x1, branch5x5, branch3x3, branch_pool]
        return torch.cat(outputs, dim=1)

identity mappings in deep residual networks论文有很多块设计

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
        self.conv2 = nn.Conv2d(88, 20, kernel_size=5)
        
        self.incep1 = InceptionA(in_channels=10)
        self.incep2 = InceptionaA(in_channels=20)
        
        self.mp = nn.MaxPool2d(2)
        self.fc = nn.Linear(1408, 10)
        
    def forwatd(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size, -1)
        x = self.fc(x)
        return x

1.深度学习》理论
2. 阅读pytorch文档,至少通读一遍
3. 复现经典工作,《读代码,自己尝试写》,能跑通只是环境配好了。
4. 扩充视野。
循环神经网络
(dense, deep)全连接

import torch

batch_size = 1
seq_len = 3
input_size = 4
hidden_size = 2
num_layers = 1

cell = torch.nn.RNN(input_size=input_size, hidden_size=hidden_size,
                    num_layers=num_layers)

inputs = torch.randn(seq_len, batch_size, input_size)
hidden = torch.zeros(num_layers, batch_size, hidden_size)

for idx, input in enumerate(inputs):
    print('=' * 20, idx, '=' * 20)
    print('Input size', input.shape)

    out, hidden = cell(inputs, hidden)

    print('outputs size', hidden.shape)
    print(hidden)

batch_first方便再接线性层

idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]
y_data = [3, 1, 2, 3, 2]

one_hot_lookup = [[1, 0, 0, 0],
                  [0, 1, 0, 0],
                  [0, 0, 1, 0],
                  [0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]

inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)
labels = torch.LongTensor(y_data).view(-1, 1)

class Model(torch.nn.Module):
    def __init__(self, input_size, hidden_size, batch_size):
        super(Model, self).__init__()
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.rnncell = torch.nn.RNNCell(input_size=self.input_size,
                                        hidden_size=self.hidden_size)

    def forward(self, input, hidden):
        hidden = self.rnncell(input, hidden)
        return hidden

    def init_hidden(self):
        return torch.zeros(self.batch_size, self.hidden_size)

net = Model(input_size, hidden_size, batch_size)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.05)

for epoch in range(15):
    loss = 0
    optimizer.zero_grad()
    hidden = net.init_hidden()
    print('predicted string:', end='')
    for input, label in zip(inputs, labels):
        hidden = net(input, hidden)
        loss += criterion(hidden, label)
        _, idx = hidden.max(dim=1)  # 找到最大下标
    print(idx2char[idx.item()], end='')
    loss.backward()
    optimizer.step()
    print(', epoch [%d/5] loss=%.4f' % (epoch+1, loss.item()))
class NameDataset(Dataset):
    def __init__(self, is_train_set=True):
        filename = 'data/names_train.csv.gz' if is_train_set else 'data/names_test.csv.gz'
        with gzip.open(filename, 'rt') as f:  # 相应的包读取相应的数据
            reader = csv.reader(f)
            rows = list(reader)
        self.names = [row[0] for row in rows]  # 每一行的第0个元素
        self.len = len(self.names)
        self.countries = [row[1] for row in rows]
        self.country_list = list(sorted(set(self.countries)))  #set把列表变成集合,去除重复元素,SORTED排序,再变列表
        self.country_dict = self.getCountryDict()  # 列表转词典
        self.country_num = len(self.country_list)

    def __getitem__(self, index):
        return self.names[index], self.country_dict[self.countries[index]]

    def __len__(self):
        return len

    def getCountryDict(self):
        country_dict = dict()
        for idx, country_name in enumerate(self.country_list, 0):
            country_dict[country_name] = idx
        return country_dict

    def idx2country(self, index):  # return country name giving index
        return self.country_list[index]

    def getCountriesNum(self):  # return the number of countries
        return self.country_num

trainset = NameDataset(is_train_set=True)
trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
testset = NameDataset(is_train_set=False)
testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)

N_COUNTRY = trainset.getCountryNum()

HIDDEN_SIZE = 100
BATCH_SIZE = 256
N_LAYER = 2
N_EPOCHS = 100
N_CHARS = 128
USE_GPU = False

class RNNClassifier(torch.nn.Module):
    def __init__(self, input_size, hidden_size, output_size, n_layers=1, bidirectional=True):
        super(RNNClassifier, self).__init__()
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.n_directions = 2 if bidirectional else 1

        self.embedding = torch.nn.Embedding(input_size, hidden_size)
        self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers, bidirectional=bidirectional)
        self.fc = torch.nn.Linear(hidden_size * self.n_directions, output_size)

    def __init_hidden(self, batch_size):
        hidden = torch.zeros(self.n_layers * self.n_directions,
                             batch_size, self.hidden_size)
        return create_tensor(hidden)

    def forward(self, input, seq_lengths):
        input = input.t()
        batch_size = input.size(1)

        hidden = self.__init_hidden(batch_size)
        embedding = self.embedding(input)

        gru_input = pack_padded_sequence(embedding, seq_lengths)

        output, hidden = self.gru(gru_input, hidden)
        if self.n_directions == 2:
            hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
        else:
            hidden_cat = hidden[-1]
        fc_output = self.fc(hidden_cat)
        return fc_output

    def make_tensors(names, countries):
        sequences_and_lengths = [name2list(name) for name in names]  # 把每一个名字变成一个列表,name2list返回元组(列表本身,列表长度)
        name_sequences = [s1[0] for s1 in sequences_and_lengths]
        seq_lengths = torch.LongTensor([s1[1] for s1 in sequences_and_lengths])
        countries = countries.long()

        seq_tensor = torch.zeros(len(name_sequences), seq_lengths.max()).long()
        for idx, (seq, seq_len) in enumerate(zip(name_sequences, seq_lengths), 0):
            seq_tensor[idx, :seq_len] = torch.nn.LongTensor(seq)  # 建立一个全0,将数据贴上去覆盖0

        seq_lengths, perm_idx = seq_lengths.sort(dim=0, descending=True)  # 按序列长度排序
        seq_tensor = seq_tensor[perm_idx]
        countries = countries[perm_idx]

        return create_temsor(seq_tensor),\
            create_tensor(seq_lengths),\
            creare_tensor(countries)

    def trainModel():
        total_loss = 0
        for i, (names, countries) in enumerate(trainloader, 1):
            inputs, seq_lengths, target = make_tensors(names, countries)
            output = classifier(inputs, seq_lengths)
            loss = criterion(output, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            if i % 10 == 0:
                print(f'[{time_since(start)}] epoch {epoch}', end='')
                print(f'[{i * len(inputs)}/{len(trainset)}]', end='')
                print(f'loss={total_loss / (i * len(inputs))}')
        return total_loss

    def testModel():
        correct = 0
        total = len(testset)
        print("evaluating trained model ...")
        with torch.no_grad():
            for i, (names, countries) in enumerate(testloader, 1):
                inputs, seq_lengths, target = make_tensors(names, countries)
                output = classifier(inputs, seq_lengths)
                pred = output.max(dim=1, keepdim=True)[1]
                correct += pred.eq(target.view_as(pred)).sum().item()

            precent = '%.2f' % (100 * correct / total)
            print(f'test set: accuracy {correct} / {total} {precent}%')

        return correct / total

你可能感兴趣的:(B站pytorch学习笔记(刘二大人))