小刘总——自动编码器

import torch
from torch import nn, optim
from torch.autograd import Variable
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as F
import os
import matplotlib.pyplot as plt


class autoencoder(nn.Module):
    def __init__(self):
        super(autoencoder, self).__init__()
        self.encoder
        self.decoder
        self.emdsize
        
    def forward(self, x):    
    
    #def backward()



# 加载数据集
def get_data():
    # 归一化
    data_tf = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
    train_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=True)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, drop_last=True)
    return train_loader

def to_img(x):
    x = (x + 1.) * 0.5
    x = x.clamp(0, 1)
    x = x.view(1, 1, 28, 28)
    return x

class autoencoder(nn.Module):
    def __init__(self, emdsize = 10):
        super(autoencoder, self).__init__()
        self.emdsize = emdsize
        self.encoder = nn.Sequential(nn.Linear(28*28, 128),
                                     nn.ReLU(True),
                                     nn.Linear(128, 64),
                                     nn.ReLU(True),
                                     nn.Linear(64, self.emdsize)
                                    )
        self.decoder = nn.Sequential(
                                     nn.Linear(self.emdsize, 64),
                                     nn.ReLU(True),
                                     nn.Linear(64, 128),
                                     nn.ReLU(True),
                                     nn.Linear(128, 28*28),
                                     nn.Tanh()
                                    )

    def forward(self, x):
        encode = self.encoder(x)
        decode = self.decoder(encode)
        return encode, decode



    batch_size = 128
    lr = 1e-2
    weight_decay = 1e-5
    epoches = 10
    model = autoencoder(emdsize = 10)
    train_data = get_data()
    criterion = nn.MSELoss()
    optimizier = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    for epoch in range(epoches):
        for img, _ in train_data:
            img = img.view(img.size(0), -1)
            # forward
            _, output = model(img)
            loss = criterion(output, img)
            # backward
            optimizier.zero_grad()
            loss.backward()
            optimizier.step()
        print("epoch=", epoch, loss.detach().numpy())
        
    # torch.save(model, './autoencoder.pth')
    # model = torch.load('./autoencoder.pth')



for img, _ in train_data:
    encode_img = to_img(img[0]).squeeze()
    encode_img = encode_img.data.numpy() * 255
    plt.imshow(encode_img.astype('uint8'), cmap='gray')
    plt.show()
    
    img = img.view(img.size(0), -1)
    _,decode = model(img[0])
    
    decode_img = to_img(decode).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()


  batch_size = 128
    lr = 1e-2
    weight_decay = 1e-5
    epoches = 10
    model = autoencoder(emdsize = 100)
    train_data = get_data()
    criterion = nn.MSELoss()
    optimizier = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    for epoch in range(epoches):
        for img, _ in train_data:
            img = img.view(img.size(0), -1)
            # forward
            _, output = model(img)
            loss = criterion(output, img)
            # backward
            optimizier.zero_grad()
            loss.backward()
            optimizier.step()
        print("epoch=", epoch, loss.detach().numpy())
        
    # torch.save(model, './autoencoder.pth')
    # model = torch.load('./autoencoder.pth')


for img, _ in train_data:
    encode_img = to_img(img[0]).squeeze()
    encode_img = encode_img.data.numpy() * 255
    plt.imshow(encode_img.astype('uint8'), cmap='gray')
    plt.show()
    
    img = img.view(img.size(0), -1)
    _,decode = model(img[0])
    
    decode_img = to_img(decode).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()


 batch_size = 128
    lr = 1e-2
    weight_decay = 1e-5
    epoches = 10
    model = autoencoder(emdsize = 1)
    train_data = get_data()
    criterion = nn.MSELoss()
    optimizier = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    for epoch in range(epoches):
        for img, _ in train_data:
            img = img.view(img.size(0), -1)
            # forward
            _, output = model(img)
            loss = criterion(output, img)
            # backward
            optimizier.zero_grad()
            loss.backward()
            optimizier.step()
        print("epoch=", epoch, loss.detach().numpy())
        
    # torch.save(model, './autoencoder.pth')
    # model = torch.load('./autoencoder.pth')



for img, _ in train_data:
    encode_img = to_img(img[0]).squeeze()
    encode_img = encode_img.data.numpy() * 255
    plt.imshow(encode_img.astype('uint8'), cmap='gray')
    plt.show()
    
    img = img.view(img.size(0), -1)
    _,decode = model(img[0])
    
    decode_img = to_img(decode).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()


class autoencoder_cnn(nn.Module):
    def __init__(self,emdsize = 10):
        super(autoencoder_cnn, self).__init__()
        self.emdsize = emdsize
        self.encoder = nn.Sequential(
            nn.Conv2d(1, 64, 3, stride=2, padding=1),  # (b, 64, 15, 15)
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2),  # (b, 16, 7, 7)
            nn.Conv2d(64, emdsize, 3, stride=2, padding=1),  # (b, emdsize, 4, 4)
            nn.ReLU(True),
            nn.MaxPool2d(2, stride=2),# (b, emdsize, 2, 2)
        )
        # C^T*(C * X) 28*28->2*2->28*28
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(emdsize, 64, 2, stride=2),  # (b, 64, 4, 4)
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 64, 3, stride=2, padding=1),  # (b, 64, 7, 7)
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 64, 3, stride=2),  # (b, 64, 15, 15)
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 1, 2, stride=2, padding=1),  # (b, 64, 28, 28)
            nn.Tanh()
        )
    def forward(self, x):
        encode = self.encoder(x)
        decode = self.decoder(encode)
        return encode, decode



  batch_size = 128
    lr = 1e-2
    weight_decay = 1e-5
    epoches = 5
    model = autoencoder_cnn(emdsize = 25)
    train_data = get_data()
    criterion = nn.MSELoss()
    optimizier = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    for epoch in range(epoches):
        for img, _ in train_data:
            # forward
            _, output = model(img)
            loss = criterion(output, img)
            # backward
            optimizier.zero_grad()
            loss.backward()
            optimizier.step()
        print("epoch=", epoch, loss.detach().numpy())
        



for img, _ in train_data:
    encode_img = to_img(img[0]).squeeze()
    encode_img = encode_img.data.numpy() * 255
    plt.imshow(encode_img.astype('uint8'), cmap='gray')
    plt.show()
    
    _,decode = model(img)
    
    decode_img = to_img(decode[0]).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()





import numpy as np
for i in range(10):
    random_img = np.random.rand(1,25,2,2)
    random_img = torch.Tensor(random_img)
    decode_img = model.decoder(random_img)
    decode_img = to_img(decode_img).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()




#变分自动编码器
#随机给出一个高斯分布向量都能生成一张图片
#每个维度学习一个均值和标准差


class VAE(nn.Module):
       def __init__(self,emdsize = 10):
             super(VAE, self).__init__()
             self.emdsize = emdsize
             self.encoder = nn.Sequential(
                   nn.Conv2d(1, 64, kernel_size=4, stride=2, padding=1),
                   nn.BatchNorm2d(64),
                   nn.ReLU(True),
                   
                   nn.Conv2d(64, 64, kernel_size=4, stride=2, padding=1),
                   nn.BatchNorm2d(64),
                   nn.ReLU(True),
                       
                   nn.Conv2d(64, 64, kernel_size=3 ,stride=1, padding=1),
                   nn.BatchNorm2d(64),
                   nn.ReLU(True),                  
                   )
             
             self.fc11 = nn.Linear(64 * 7 * 7, self.emdsize)
             self.fc12 = nn.Linear(64 * 7 * 7, self.emdsize)
             self.fc2 = nn.Linear(self.emdsize, 64 * 7 * 7)
             
             self.decoder = nn.Sequential(                
                   nn.ConvTranspose2d(64, 64, kernel_size=4, stride=2, padding=1),
                   nn.ReLU(inplace=True),
                   
                   nn.ConvTranspose2d(64, 1, kernel_size=4, stride=2, padding=1),
                   nn.Tanh()
                   )
 
       def reparameterize(self, mu, logvar):
             eps = torch.randn(mu.size())
             z = mu + eps * torch.exp(0.5*logvar)            
             
             return z
       
       def forward(self, x):
             out = self.encoder(x)  
             mu = self.fc11(out.view(out.size(0),-1))
             logvar = self.fc12(out.view(out.size(0),-1)) 
             z = self.reparameterize(mu, logvar)           
             out3 = self.fc2(z).view(z.size(0), 64, 7, 7)    
             gen_img =  self.decoder(out3)
             return gen_img, mu, logvar
 






def loss_func(recon_x, x, mu, logvar):
    loss = nn.MSELoss(size_average = False)
    MSE = loss(recon_x,x)
    KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
    #-plogp   p= 0 100%
    #-plogq
    return MSE+KLD






batch_size = 128
    lr = 1e-3
    epoches = 10
    model = VAE(emdsize = 10)
    train_data = get_data()
    optimizier = optim.Adam(model.parameters(), lr=lr)
    for epoch in range(epoches):
        for img, _ in train_data:
            # forward
            output, mu, logvar = model(img)
            loss = loss_func(output, img, mu, logvar)
            # backward
            optimizier.zero_grad()
            loss.backward()
            optimizier.step()
        print("epoch=", epoch, loss.detach().numpy()/batch_size)
        





import numpy as np
for i in range(10):
    sample = torch.randn(1, 10)
    sample = model.decoder(model.fc2(sample).view(1, 64, 7, 7))
    decode_img = to_img(sample).squeeze()
    decode_img = decode_img.data.numpy() * 255
    plt.imshow(decode_img.astype('uint8'), cmap='gray')
    plt.show()





数据集的路径:/data/2020-GAN训练营-datasets

你可能感兴趣的:(cv,numpy,深度学习,神经网络,数据挖掘)