李宏毅作业十 Generative Adversarial Network生成对抗网络(代码)
李宏毅作业八unsupervised无监督聚类学习
李宏毅作业七其三 Network Compression (Network Pruning)
李宏毅作业七其二 Network Compression (Knowledge Distillation)
李宏毅作业七其一 Network Compression (Architecuture Design)
李宏毅作业六 Adversarial Attack对抗攻击
本篇不涉及过多理论,半监督训练的异常检测其实就是在同时含有标注数据和未标注数据的训练集中学习模型。闲话少说,理论过完,看了代码基本就知道到底是怎么实现的了。
半监督的异常检测:即从同时含有标注数据和未标注数据的训练集中学习模型。在监督学习中,因为训练集全部已经标记了,所以我们的兴趣点通常是在未来测试数据上的性能。而在半监督学习的分类问题中,训练数据中包含未标记的数据。因此,存在两个不同的目标。一个是预测测试集数据的类别,另一个是预测训练样本中未标记实例的类别(这里我们都假设是分类问题)。
训练集没有杂质
测试集才会有异常数据
先训练一个简单的模型,其中训练集的类别标签是总训练集类别标签的一部分,其余类别标签是要侦测的异常标签。
测试集则包含所有数据,包含要检测的异常数据。
1为异常数据
0为正常数据
正确率用AUC计算
!gdown --id '1_zT3JOpvXFGr7mkxs3XJDeGxTn_8pItq' --output train.npy
!gdown --id '11Y_6JDjlhIY-M5-jW1rLRshDMqeKi9Kr' --output test.npy
import numpy as np
train = np.load('train.npy', allow_pickle=True)
test = np.load('test.npy', allow_pickle=True)
K-means: 假设训练集的标签少(<20),但此时是未知的,所以我们猜测是n,也就是说假设训练集由n类。先用K-means计算训练集的n个中心,然后用这n个中心对训练数据分群。此时。inlier data (正常数据)到 centroid(群中心)的距离比 outlier (异常数据)到群中心的距离短。
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import f1_score, pairwise_distances, roc_auc_score
#f1_score(y_true, y_pred[, labels, …])
#F1分数可以解释为精度和查全率的加权平均值,其中F1分数在1时达到最佳值,在0时达到最差值。精度和查全率对F1分数的相对贡献相等。F1分数的公式为:
#F1 = 2 * (precision * recall) / (precision + recall)
#pairwise_distances
#X和Y中样本间的距离的计算
from scipy.cluster.vq import vq, kmeans
#scipy.cluster是scipy下的一个做聚类的package, 共包含了两类聚类方法:
#1. 矢量量化(scipy.cluster.vq):支持vector quantization 和 k-means 聚类方法
if task == 'knn':
x = train.reshape(len(train), -1)#矩阵整形(len(train),a),这里a是除去len(train)后的剩下维度融合。
y = test.reshape(len(test), -1)
scores = list()
for n in range(1, 10):
kmeans_x = MiniBatchKMeans(n_clusters=n, batch_size=100).fit(x)
#n_clusters=n,类中心数,batch_size=100,批大小。fit(x)对数据x进行训练
y_cluster = kmeans_x.predict(y)#根据训练后的数据x得到了类中心
y_dist = np.sum(np.square(kmeans_x.cluster_centers_[y_cluster] - y), axis=1)
#根据该点到各个类中心的距离和,求得该位置的标签。
y_pred = y_dist
#求两类分数
score = f1_score(y_label, y_pred, average='micro')
score = roc_auc_score(y_label, y_pred, average='micro')
scores.append(score)
print(np.max(scores), np.argmax(scores))
print(scores)
print('auc score: {}'.format(np.max(scores)))
PCA:计算训练数据的主成分(principle component),将测试数据投影到这些主成分(component)上,再把投影向量重建回原来的空间向量。对重建的图片和原图计算MSE,正常数据的误差比异常数据的误差要小。
from sklearn.decomposition import PCA
if task == 'pca':
x = train.reshape(len(train), -1)
y = test.reshape(len(test), -1)
pca = PCA(n_components=2).fit(x)#pca降维
y_projected = pca.transform(y)
y_reconstructed = pca.inverse_transform(y_projected)
#inverse_transform()
#将降维后的数据转换成原始数据,X=pca.inverse_transform(newX)
dist = np.sqrt(np.sum(np.square(y_reconstructed - y).reshape(len(y), -1), axis=1))
#求距离
y_pred = dist
# score = roc_auc_score(y_label, y_pred, average='micro')
# score = f1_score(y_label, y_pred, average='micro')
# print('auc score: {}'.format(score))
import torch
from torch import nn
import torch.nn.functional as F
#自动编码解码器
class fcn_autoencoder(nn.Module):
def __init__(self):
super(fcn_autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(32 * 32 * 3, 128),
nn.ReLU(True),
nn.Linear(128, 64),
nn.ReLU(True), nn.Linear(64, 12), nn.ReLU(True), nn.Linear(12, 3))
self.decoder = nn.Sequential(
nn.Linear(3, 12),
nn.ReLU(True),
nn.Linear(12, 64),
nn.ReLU(True),
nn.Linear(64, 128),
nn.ReLU(True), nn.Linear(128, 32 * 32 * 3
), nn.Tanh())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class conv_autoencoder(nn.Module):
def __init__(self):
super(conv_autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
# nn.Conv2d(48, 96, 4, stride=2, padding=1), # [batch, 96, 2, 2]
# nn.ReLU(),
)
self.decoder = nn.Sequential(
# nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
# nn.ReLU(),
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Tanh(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(32*32*3, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 32*32*3)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparametrize(self, mu, logvar):#重新确定参数
std = logvar.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
return self.decode(z), mu, logvar
def loss_vae(recon_x, x, mu, logvar, criterion):
"""
recon_x: generating images生成图像
x: origin images原始图像
mu: latent mean#潜在平均值
logvar: latent log variance#潜在平均对数
"""
mse = criterion(recon_x, x) # mse loss
# loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.sum(KLD_element).mul_(-0.5)
# KL divergence
return mse + KLD
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim import Adam, AdamW
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
if task == 'ae':
num_epochs = 1000
batch_size = 128
learning_rate = 1e-3
#{'fcn', 'cnn', 'vae'} 调用上面我们构造好的模型
model_type = 'cnn'
x = train
if model_type == 'fcn' or model_type == 'vae':
x = x.reshape(len(x), -1)
data = torch.tensor(x, dtype=torch.float)
train_dataset = TensorDataset(data)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size)
model_classes = {'fcn':fcn_autoencoder(), 'cnn':conv_autoencoder(), 'vae':VAE()}
model = model_classes[model_type].cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.AdamW(
model.parameters(), lr=learning_rate)
best_loss = np.inf
model.train()
for epoch in range(num_epochs):
for data in train_dataloader:
if model_type == 'cnn':
img = data[0].transpose(3, 1).cuda()
else:
img = data[0].cuda()
# ===================forward=====================
output = model(img)
if model_type == 'vae':
loss = loss_vae(output[0], img, output[1], output[2], criterion)
else:
loss = criterion(output, img)
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================save====================
if loss.item() < best_loss:
best_loss = loss.item()
torch.save(model, 'best_model_{}.pt'.format(model_type))
# ===================log========================
print('epoch [{}/{}], loss:{:.4f}'
.format(epoch + 1, num_epochs, loss.item()))
将测试的图片载入MODEL后,可以看到重建后的突破按,对二者取平方差。可以得出正常数据的平方差和异常数据的平方差形成明显的两类。
if task == 'ae':
if model_type == 'fcn' or model_type == 'vae':
y = test.reshape(len(test), -1)
else:
y = test
data = torch.tensor(y, dtype=torch.float)
test_dataset = TensorDataset(data)
test_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=batch_size)
model = torch.load('best_model_{}.pt'.format(model_type), map_location='cuda')
model.eval()
reconstructed = list()
for i, data in enumerate(test_dataloader):
if model_type == 'cnn':
img = data[0].transpose(3, 1).cuda()
else:
img = data[0].cuda()
output = model(img)
if model_type == 'cnn':
output = output.transpose(3, 1)
elif model_type == 'vae':
output = output[0]
reconstructed.append(output.cpu().detach().numpy())
reconstructed = np.concatenate(reconstructed, axis=0)
anomality = np.sqrt(np.sum(np.square(reconstructed - y).reshape(len(y), -1), axis=1))
y_pred = anomality
with open('prediction.csv', 'w') as f:
f.write('id,anomaly\n')
for i in range(len(y_pred)):
f.write('{},{}\n'.format(i+1, y_pred[i]))
score = roc_auc_score(y_label, y_pred, average='micro')
score = f1_score(y_label, y_pred, average='micro')
print('auc score: {}'.format(score))
该内容暂时用不到所以只是过下大致内容,代码和注释内容有不对地方,还请大佬们多多指教。