隐语义模型LFM梯度下降算法简单实现

import numpy as np
import pandas as pd
# 评分矩阵R
R = np.array([[4,0,2,0,1],
              [0,2,3,0,0],
              [1,0,2,4,0],
              [5,0,0,3,1],
              [0,0,1,5,1],
              [0,3,2,4,1,],])
 
def LFM_grad_desc(R, K=2, max_iter=1000, alpha=0.0001, lamda=0.001):
    # 基本维度定义,这一部分可以直接使用shape.
    M = len(R)
    N = len(R[0])
    
    # P,Q 初始值,随机生成
    P = np.random.rand(M,K)
    Q = np.random.rand(N,K)
    Q = Q.T

    # 开始迭代
    for step in range(max_iter):
        # 对所有的用户u,物品i做遍历,对应的特征向量Pu,Qi梯度下降
        for u in range(M):
            for i in range(N):
                #在评分矩阵中的分数,对于每一个大于0的评分,求出预测评分误差
                if R[u][i] > 0:
                    eui = np.dot(P[u,:],Q[:,i]) - R[u][i]
                    
                    # 代入公式,按照梯度下降算法更新当前的Pu,Qi
                    for k in range(K):
                        P[u][k] -= alpha * (2 * eui * Q[k][i] + 2 * lamda * P[u][k])
                        Q[k][i] -= alpha * (2 * eui * P[u][k] + 2 * lamda * Q[k][i])


        # u,i 遍历完成,所有特征向量完成更新,可以得到P,Q,可以的到预测评分矩阵。
        predR = np.dot( P, Q)
        # 计算当前损失函数
        cost = 0
        for u in range(M):
            for i in range(N):
                if R[u][i]>0:
                    cost += (np.dot(P[u,:],Q[:,i]) - R[u][i]) ** 2
                    # 加上正则化
                    for k in range(K):
                        cost += lamda * (P[u][k]**2 + Q[k][i]**2)

        if cost< 0.0001:
            break
    return P, Q.T, cost

       

测试,这里选择的是jupyter notebook

# 设置参数
K = 5
max_iter = 5000
alpha = 0.0002
lamda = 0.004
P, Q, cost = LFM_grad_desc(R, K, max_iter, alpha, lamda)
print(P)
print(Q)
print(cost)
predR = P.dot(Q.T)
predR

结果展示

隐语义模型LFM梯度下降算法简单实现_第1张图片

你可能感兴趣的:(隐语义模型LFM梯度下降算法简单实现)