FM模型的例子

import torch
import torch.nn as nn

# 定义因子分解机(Factorization Machine)模型类
class FM(nn.Module):
    def __init__(self, dim, k):
        super(FM, self).__init__()
        self.dim = dim  # 特征维度
        self.k = k  # 因子个数
        self.w = nn.Linear(self.dim, 1, bias=True)  # 线性部分的权重矩阵
        self.v = nn.Parameter(torch.rand(self.dim, self.k) / 100)  # 因子分解部分的权重矩阵,通过随机初始化获得

    def forward(self, x):
        linear = self.w(x)  # 计算线性部分的输出
        quadratic = 0.5 * torch.sum(torch.pow(torch.mm(x, self.v), 2) - torch.mm(torch.pow(x, 2), torch.pow(self.v, 2)))  # 计算因子分解部分的输出
        return torch.sigmoid(linear + quadratic)  # 返回最终的预测结果


# 训练代码
def train(model, train_loader, optimizer, criterion, epochs):
    model.train()  # 设置模型为训练模式

    for epoch in range(epochs):
        running_loss = 0.0

        for inputs, labels in train_loader:
            optimizer.zero_grad()  # 清零梯度

            outputs = model(inputs)  # 前向传播计算输出
            loss = criterion(outputs, labels)  # 计算损失函数

            loss.backward()  # 反向传播计算梯度
            optimizer.step()  # 更新模型参数

            running_loss += loss.item()

        epoch_loss = running_loss / len(train_loader)
        print(f"Epoch [{epoch + 1}/{epochs}], Loss: {epoch_loss:.4f}")


# 测试代码
def test(model, test_loader):
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, labels in test_loader:
            outputs = model(inputs)  # 前向传播计算输出
            predicted = torch.round(outputs)  # 对输出进行四舍五入,转为二分类结果(0或1)
            total += labels.size(0)  # 计算样本总数
            correct += (predicted == labels).sum().item()  # 统计预测正确的样本数

    accuracy = correct / total  # 计算准确率
    print(f"Accuracy on test set: {accuracy:.4f}")


# 示例数据生成和使用
def main():
    # 生成示例数据
    batch_size = 32  # 批次大小
    dim = 10  # 特征维度
    k = 5  # 因子个数

    train_data = torch.randn(1000, dim)  # 随机生成训练数据
    train_labels = torch.randint(0, 2, (1000,))  # 随机生成训练标签(0或1)
    test_data = torch.randn(200, dim)  # 随机生成测试数据
    test_labels = torch.randint(0, 2, (200,))  # 随机生成测试标签(0或1)

    train_labels = train_labels.unsqueeze(1)  # 将训练标签的维度扩展为(样本数,1)
    test_labels = test_labels.unsqueeze(1)  # 将测试标签的维度扩展为(样本数,1)

    train_labels = train_labels.float()  # 将训练标签转换为浮点型
    test_labels = test_labels.float()  # 将测试标签转换为浮点型

    train_dataset = torch.utils.data.TensorDataset(train_data, train_labels)  # 构建训练数据集
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)  # 构建训练数据加载器
    test_dataset = torch.utils.data.TensorDataset(test_data, test_labels)  # 构建测试数据集
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)  # 构建测试数据加载器

    # 创建模型实例
    model = FM(dim, k)

    # 设置优化器、损失函数和训练参数
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # 使用随机梯度下降(SGD)优化器,学习率为0.01
    criterion = nn.BCELoss()  # 使用二分类交叉熵作为损失函数
    epochs = 100  # 训练轮数

    # 训练模型
    train(model, train_loader, optimizer, criterion, epochs)

    # 在测试集上评估模型
    test(model, test_loader)


if __name__ == "__main__":
    main()

你可能感兴趣的:(机器学习的感悟,深度学习,python,机器学习)