AutoEncoder自编码器

使用Pytorch1.0,GPU版本

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
import numpy as np

torch.manual_seed(1)

EPOCH = 10
BATCH_SIZE = 64
LR = 0.005
N_TEST_IMG = 5

train_data = torchvision.datasets.MNIST(
        root = 'D:/dataset/mnist/',
        train = True,
        transform = torchvision.transforms.ToTensor(),
        download = True)

# 查看一个数据例子
print(train_data.train_data.size())
print(train_data.train_labels.size())
plt.imshow(train_data.train_data[2].numpy(), cmap='gray')
plt.title('{}'.format(train_data.train_labels[2]))
plt.show()

train_dataloader = Data.DataLoader(dataset = train_data,
                                   batch_size = BATCH_SIZE,
                                   shuffle = True,
                                   drop_last = True)

class autoEncoder(nn.Module):
    def __init__(self):
        super(autoEncoder, self).__init__()
        self.encoder = nn.Sequential(nn.Linear(28*28, 128),
                                     nn.Tanh(),
                                     nn.Linear(128, 64),
                                     nn.Tanh(),
                                     nn.Linear(64, 12),
                                     nn.Tanh(),
                                     nn.Linear(12, 3))
        
        self.decoder = nn.Sequential(nn.Linear(3, 12),
                                     nn.Tanh(),
                                     nn.Linear(12, 64),
                                     nn.Tanh(),
                                     nn.Linear(64, 128),
                                     nn.Tanh(),
                                     nn.Linear(128, 28*28),
                                     nn.Sigmoid())
    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded

model = autoEncoder()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
loss_func = nn.MSELoss()

# 显示前5张图
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5,2)) #返回figure和axes
plt.ion()
view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.
for i in range(N_TEST_IMG):
    a[0][i].imshow(np.reshape(view_data.numpy()[i], (28,28)), cmap='gray')
    a[0][i].set_xticks(())
    a[0][i].set_yticks(())
    
# 训练
use_cuda = torch.cuda.is_available()
device = torch.device('cuda') if use_cuda else torch.device('cpu')

model = model.to(device)
    
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_dataloader):
        b_x = x.view(-1, 28*28).requires_grad_().to(device)
        b_y = x.view(-1, 28*28).requires_grad_().to(device)
        y = y.type(torch.FloatTensor)
        b_label = y.requires_grad_().to(device)
        optimizer.zero_grad()
        encoded, decoded = model(b_x)
        loss = loss_func(decoded, b_y)
        loss.backward()
        optimizer.step()
        if step % 100 == 0:
            print('Epoch: {}   |   train loss: {:.4f}'.format(epoch+1, loss.item()))
    #输出解码后的5张图
    encoded_data, decoded_data = model(view_data.to(device))
    for i in range(N_TEST_IMG):
        a[1][i].clear()
        a[1][i].imshow(np.reshape(decoded_data.detach().to(torch.device('cpu')).numpy()[i], (28,28)), cmap='gray')
        a[1][i].set_xticks(())
        a[1][i].set_yticks(())
        plt.draw()
        plt.pause(0.1)
    plt.ioff()
    plt.show()

你可能感兴趣的:(AutoEncoder自编码器)