目录
原理介绍
代码复现:
数据下载链接:
Product Layer 代码如下:
PNN 代码如下:
完整工程:
参考:
论文中原图乍一眼看上去和 DeepCrossing 很像,都是多特征的输入、具有Embedding层、多层的神经网络、以及最终预测的都是CTR指标;唯一的区别也是很容易发现的,就是 PNN模型用 Product Layer 替换了 DeepCrossing 的 Stacking;相比较下,Stacking 仅仅做了Embedding向量拼接,而Product Layer 似乎是做了两两之间的交互,做特征交叉也是常见的改进方法。 但是从图中并不能看出 Product Layer 具体做了什么运算,只能从论文中找到了,事实上论文中也确实很详细的解释了。同时发现一个前辈帖子中进行补充了完整的模型结构图。
推荐系统(九)PNN模型(Product-based Neural Networks)_天泽28的博客-CSDN博客_pnn模型https://blog.csdn.net/u012328159/article/details/122910791?spm=1001.2014.3001.5502
结合 前辈的图示 与 原作者论文的描述,就能很好的理清楚其中各部分的计算方法,当然也可以直接看链接中前辈的总结。关于 内积 和 外积 的计算,参考下面链接(站一下前人的肩膀,相同内容copy来copy去的没啥意思):
矩阵乘法核心思想(5):内积与外积 - 知乎 (zhihu.com)https://zhuanlan.zhihu.com/p/350470257
criteo数据用于推荐系统学习-深度学习文档类资源-CSDN文库https://download.csdn.net/download/Big_Huang/85155340?spm=1001.2014.3001.5503 代码复现部分,虽然原理能看明白一些,但是编写又是一回事啊,于是乎又找到这一篇帖子的复现。
AI上推荐 之 NeuralCF与PNN模型(改变特征交叉方式)_翻滚的小@强的博客-CSDN博客_neuralcf模型https://zhongqiang.blog.csdn.net/article/details/108985457 虽然自己又在代码基础上加了一些自己理解的注释,但是复杂的矩阵计算确实不能很好的直观理解为什么这样做,个人建议直接单步运行,在调试器下查看每个阶段矩阵的维度信息,理清楚矩阵计算的过程与论文原理的对应。
class ProductLayer(nn.Module):
def __init__(self, mode, embed_dim, feature_nums, hidden_units):
# 详细原理过程可参考: https://blog.csdn.net/u012328159/article/details/122910791
super(ProductLayer, self).__init__()
self.mode = mode
self.w_z = nn.Parameter(torch.rand([feature_nums, embed_dim, hidden_units[0]]))
# p部分, 分内积和外积两种操作
if mode == 'in':
self.w_p = nn.Parameter(torch.rand([feature_nums, feature_nums, hidden_units[0]]))
else:
self.w_p = nn.Parameter(torch.rand([embed_dim, embed_dim, hidden_units[0]]))
self.l_b = nn.Parameter(torch.rand([hidden_units[0], ]))
def forward(self, z, sparse_embeds):
# lz部分
l_z = torch.mm(z.reshape(z.shape[0], -1), self.w_z.permute((2, 0, 1)).reshape(self.w_z.shape[2], -1).T)# (None, hidden_units[0])
# lp 部分
if self.mode == 'in':
# 内积, p为embedding特征两两之间内积的结果矩阵,两两之间 => 所以矩阵维度为 [feature_nums, feature_nums]
# [batch, feature_nums, embed_nums] * [batch, embed_nums, feature_nums] = [batch, feature_nums, feature_nums]
p = torch.matmul(sparse_embeds, sparse_embeds.permute((0, 2, 1)))
else:
# 外积
# 原本的外积操作是 两两特征embedding 外积得到 n*n个[embed_dim, embed_dim]的矩阵, 然后对应位置求和得到最终的1个[embed_dim, embed_dim]的矩阵
# 参考示图: https://zhuanlan.zhihu.com/p/350470257
# 但是上述计算负责度非常高,论文作者提出降维方法,先把sparse_embeds矩阵在 feature_nums 方向上先求和, 然后再外积
# 在 feature_nums 方向上先求和后,embedding将由 [feature_nums, embed_nums] 降为 [1, embed_nums]
# 最终外积结果也就是一个 [embed_nums, embed_nums]
f_sum = torch.unsqueeze(torch.sum(sparse_embeds, dim=1), dim=1) # [None, 1, embed_dim]
p = torch.matmul(f_sum.permute((0, 2, 1)), f_sum) # [None, embed_dim, embed_dim]
l_p = torch.mm(p.reshape(p.shape[0], -1), self.w_p.permute((2, 0, 1)).reshape(self.w_p.shape[2], -1).T) # [None, hidden_units[0]]
output = l_p + l_z + self.l_b
return output
class PNN(nn.Module):
def __init__(self, features_info, hidden_unit, embedding_dim=10, outout_dim=1, mode='in'):
super().__init__()
# 解析特征信息
self.dense_features, self.sparse_features, self.sparse_features_nunique = features_info
self.__dense_features_nums = len(self.dense_features)
self.__sparse_features_nums = len(self.sparse_features)
# 构建 embedding
self.embeds_layers = nn.ModuleDict({
"embed_" + str(key): nn.Embedding(num_embeddings=fea_num, embedding_dim=embedding_dim)
for key, fea_num in self.sparse_features_nunique.items()
})
# 构建乘积层
self.productLayer = ProductLayer(mode, embedding_dim, self.__sparse_features_nums, hidden_unit)
# 全连接L1
self.L1 = nn.Linear(hidden_unit[0] + self.__dense_features_nums, hidden_unit[1])
# 全连接L2
self.L2 = nn.Linear(hidden_unit[1], outout_dim)
def forward(self, x):
# 从输入x中单独拿出 sparse_input 和 dense_input
dense_inputs, sparse_inputs = x[:, :self.__dense_features_nums], x[:, self.__dense_features_nums:]
sparse_inputs = sparse_inputs.long()
# 稀疏特征编码embedding
sparse_embeds = [self.embeds_layers["embed_" + key](sparse_inputs[:, idx]) for idx, key in enumerate(self.sparse_features)]
sparse_embeds = torch.stack(sparse_embeds)
sparse_embeds = sparse_embeds.permute((1, 0, 2))
# 乘积层运算
z = sparse_embeds
product_out = self.productLayer(z, sparse_embeds)
product_out = torch.cat([product_out, dense_inputs], axis=-1)
# L1 层运算
out = F.relu(self.L1(product_out))
# L2 层运算
out = self.L2(out)
return F.sigmoid(out)
除了模型结构部分不同,其余部分直接套用 Deep Crossing 论文复现的结果了~~ 唯一需要注意的就是学习率得小一点,否则很难收敛
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
class ProductLayer(nn.Module):
pass
class PNN(nn.Module):
pass
def getCriteo(data_path='./criteo/train.csv'):
df_data = pd.read_csv(data_path, sep=',')
df_data.drop(['Id'], axis=1, inplace=True)
dense_features = ['I'+str(i+1) for i in range(13)]
sparse_features = ['C'+str(i+1) for i in range(26)]
# 填充缺失值
df_data[sparse_features] = df_data[sparse_features].fillna('-1')
df_data[dense_features] = df_data[dense_features].fillna(0)
# 类别型特征进行 LabelEncoder 编码
for feature in sparse_features:
df_data[feature] = LabelEncoder().fit_transform(df_data[feature])
# 数值型特征进行 特征归一化
df_data[dense_features] = MinMaxScaler().fit_transform(df_data[dense_features])
label = df_data.pop('Label')
sparse_features_nunique = {}
for fea in sparse_features:
sparse_features_nunique[fea] = df_data[fea].nunique()
features_info = [dense_features, sparse_features, sparse_features_nunique]
return df_data, label, features_info
class TrainTask:
def __init__(self, model, lr=0.0001, use_cuda=False):
self.__device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
self.__model = model.to(self.__device)
self.__loss_fn = nn.BCELoss().to(self.__device)
self.__optimizer = torch.optim.Adam(model.parameters(), lr=lr)
self.train_loss = []
self.eval_loss = []
self.train_metric = []
self.eval_metric = []
def __train_one_batch(self, feas, labels):
""" 训练一个batch
"""
self.__optimizer.zero_grad()
# 1. 正向
outputs = self.__model(feas)
# 2. loss求解
loss = self.__loss_fn(outputs.squeeze(), labels)
# 3. 梯度回传
loss.backward()
self.__optimizer.step()
return loss.item(), outputs
def __train_one_epoch(self, train_dataloader, epoch_id):
""" 训练一个epoch
"""
self.__model.train()
loss_sum = 0
batch_id = 0
for batch_id, (feas, labels) in enumerate(train_dataloader):
feas, labels = Variable(feas).to(self.__device), Variable(labels).to(self.__device)
loss, outputs = self.__train_one_batch(feas, labels)
loss_sum += loss
self.train_loss.append(loss_sum / (batch_id + 1))
print("Training Epoch: %d, mean loss: %.5f" % (epoch_id, loss_sum / (batch_id + 1)))
def train(self, train_dataset, eval_dataset, epochs, batch_size):
# 构造DataLoader
train_data_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
eval_data_loader = DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
print('-' * 20 + ' Epoch {} starts '.format(epoch) + '-' * 20)
# 训练一个轮次
self.__train_one_epoch(train_data_loader, epoch_id=epoch)
# 验证一遍
self.__eval(eval_data_loader, epoch_id=epoch)
def __eval(self, eval_dataloader, epoch_id):
""" 验证集上推理一遍
"""
batch_id = 0
loss_sum = 0
self.__model.eval()
for batch_id, (feas, labels) in enumerate(eval_dataloader):
with torch.no_grad():
feas, labels = Variable(feas).to(self.__device), Variable(labels).to(self.__device)
# 1. 正向
outputs = self.__model(feas)
# 2. loss求解
loss = self.__loss_fn(outputs.view(-1), labels)
loss_sum += loss.item()
self.eval_loss.append(loss_sum / (batch_id + 1))
print("Evaluate Epoch: %d, mean loss: %.5f" % (epoch_id, loss_sum / (batch_id + 1)))
def __plot_metric(self, train_metrics, val_metrics, metric_name):
""" 指标可视化
"""
epochs = range(1, len(train_metrics) + 1)
plt.plot(epochs, train_metrics, 'bo--')
plt.plot(epochs, val_metrics, 'ro-')
plt.title('Training and validation '+ metric_name)
plt.xlabel("Epochs")
plt.ylabel(metric_name)
plt.legend(["train_"+metric_name, 'val_'+metric_name])
plt.show()
def plot_loss_curve(self):
self.__plot_metric(self.train_loss, self.eval_loss, "Loss")
if __name__ == "__main__":
df_data, label, features_info = getCriteo()
# 划分、构建数据集、数据通道
x_train, x_val, y_train, y_val = train_test_split(df_data, label, test_size=0.2, random_state=2022)
train_dataset = TensorDataset(torch.tensor(x_train.values).float(), torch.tensor(y_train.values).float())
val_dataset = TensorDataset(torch.tensor(x_val.values).float(), torch.tensor(y_val.values).float())
# 构建模型
model = PNN(features_info, hidden_unit=[256, 128])
task = TrainTask(model, lr=0.0001, use_cuda=True)
task.train(train_dataset, val_dataset, epochs=50, batch_size=16)
task.plot_loss_curve()
简单跑一下,结果还行~
1. 《深度学习推荐系统》
2. https://datawhalechina.github.io/fun-rec/