SparkMLlib ALS算法

本次试验使用movieLens数据集的一千万行评分数据进行训练和预测,每个用户取预测评分最高的Top10存入HBase,代码如下:

#!/usr/bin/python
#encoding=utf-8

from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
import happybase

connection = happybase.Connection('172.17.93.230', autoconnect=False)
connection.open()
connection.create_table('als_moive',{'recommend':dict()})
recommend = connection.table('als_moive')
sc = SparkContext(appName='als')

# Load and parse the data
data = sc.textFile("file:///root/recommend/test.csv")
ratings = data.map(lambda l: l.split(',')).map(lambda l: Rating(int(l[0]), int(l[1]), float(l[2])))
# Build the recommendation model using Alternating Least Squares
rank = 10
numIterations = 10
lambda_ = 0.02
blocks = 100
model = ALS.train(ratings, rank, numIterations, lambda_, blocks)
#model = ALS.trainImplicit(ratings, rank, numIterations,lambda_, alpha=0.1)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
#print ratesAndPreds
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
print("Mean Squared Error = " + str(MSE))
user_predict = model.recommendProductsForUsers(10).collect()

for x in user_predict:
    key = str(x[0])
    tmp = x[1]
    items = []
    for item in tmp:
        item_value = str(item.product) + ':' + str(item.rating)
        items.append(item_value)
    value = ','.join(items)
    recommend.put(key, {'recommend:item_value':value})

# Save and load model
model.save(sc, "file:///root/recommend/myCollaborativeFilter")
sameModel = MatrixFactorizationModel.load(sc, "target/tmp/myCollaborativeFilter")
sc.stop()

你可能感兴趣的:(SparkMLlib ALS算法)