程序主体代码:
package com.atguigu.recommender
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoClientURI}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
*
* Product数据集
* 3982 商品ID
* Fuhlen 富勒 M8眩光舞者时尚节能 商品名称
* 1057,439,736 商品分类ID,不需要
* B009EJN4T2 亚马逊ID,不需要
* https://images-cn-4.ssl-image 商品的图片URL
* 外设产品|鼠标|电脑/办公 商品分类
* 富勒|鼠标|电子产品|好用|外观漂亮 商品UGC标签
*/
case class Product( productId: Int, name: String, imageUrl: String, categories: String, tags: String )
/**
* Rating数据集
* 4867 用户ID
* 457976 商品ID
* 5.0 评分
* 1395676800 时间戳
*/
case class Rating( userId: Int, productId: Int, score: Double, timestamp: Int )
/**
* MongoDB连接配置
* @param uri MongoDB的连接uri
* @param db 要操作的db
*/
case class MongoConfig( uri: String, db: String )
object DataLoader {
// 定义数据文件路径
val PRODUCT_DATA_PATH = "D:\\Projects\\BigData\\ECommerceRecommendSystem\\recommender\\DataLoader\\src\\main\\resources\\products.csv"
val RATING_DATA_PATH = "D:\\Projects\\BigData\\ECommerceRecommendSystem\\recommender\\DataLoader\\src\\main\\resources\\ratings.csv"
// 定义mongodb中存储的表名
val MONGODB_PRODUCT_COLLECTION = "Product"
val MONGODB_RATING_COLLECTION = "Rating"
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
// 创建一个spark config
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("DataLoader")
// 创建spark session
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
import spark.implicits._
// 加载数据
val productRDD = spark.sparkContext.textFile(PRODUCT_DATA_PATH)
val productDF = productRDD.map( item => {
// product数据通过^分隔,切分出来
val attr = item.split("\\^")
// 转换成Product
Product( attr(0).toInt, attr(1).trim, attr(4).trim, attr(5).trim, attr(6).trim )
} ).toDF()
val ratingRDD = spark.sparkContext.textFile(RATING_DATA_PATH)
val ratingDF = ratingRDD.map( item => {
val attr = item.split(",")
Rating( attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt )
} ).toDF()
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
storeDataInMongoDB( productDF, ratingDF )
spark.stop()
}
/**
* 数据写入MongoDB
*/
def storeDataInMongoDB( productDF: DataFrame, ratingDF: DataFrame )(implicit mongoConfig: MongoConfig): Unit ={
// 新建一个mongodb的连接,客户端
val mongoClient = MongoClient( MongoClientURI(mongoConfig.uri) )
// 定义要操作的mongodb表,可以理解为 db.Product
val productCollection = mongoClient( mongoConfig.db )( MONGODB_PRODUCT_COLLECTION )
val ratingCollection = mongoClient( mongoConfig.db )( MONGODB_RATING_COLLECTION )
// 如果表已经存在,则删掉
productCollection.dropCollection()
ratingCollection.dropCollection()
// 将当前数据存入对应的表中
productDF.write
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_PRODUCT_COLLECTION)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
ratingDF.write
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_RATING_COLLECTION)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
// 对表创建索引
productCollection.createIndex( MongoDBObject( "productId" -> 1 ) )
ratingCollection.createIndex( MongoDBObject( "productId" -> 1 ) )
ratingCollection.createIndex( MongoDBObject( "userId" -> 1 ) )
mongoClient.close()
}
}
代码部分:
package com.atguigu.statistics
import java.text.SimpleDateFormat
import java.util.Date
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
case class Rating( userId: Int, productId: Int, score: Double, timestamp: Int )
case class MongoConfig( uri: String, db: String )
object StatisticsRecommender {
// 定义mongodb中存储的表名
val MONGODB_RATING_COLLECTION = "Rating"
val RATE_MORE_PRODUCTS = "RateMoreProducts"
val RATE_MORE_RECENTLY_PRODUCTS = "RateMoreRecentlyProducts"
val AVERAGE_PRODUCTS = "AverageProducts"
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[1]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
// 创建一个spark config
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StatisticsRecommender")
// 创建spark session
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
import spark.implicits._
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
// 加载数据
val ratingDF = spark.read
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[Rating]
.toDF()
// 创建一张叫ratings的临时表
ratingDF.createOrReplaceTempView("ratings")
// TODO: 用spark sql去做不同的统计推荐
// 1. 历史热门商品,按照评分个数统计,productId,count
val rateMoreProductsDF = spark.sql("select productId, count(productId) as count from ratings group by productId order by count desc")
storeDFInMongoDB( rateMoreProductsDF, RATE_MORE_PRODUCTS )
// 2. 近期热门商品,把时间戳转换成yyyyMM格式进行评分个数统计,最终得到productId, count, yearmonth
// 创建一个日期格式化工具
val simpleDateFormat = new SimpleDateFormat("yyyyMM")
// 注册UDF,将timestamp转化为年月格式yyyyMM
spark.udf.register("changeDate", (x: Int)=>simpleDateFormat.format(new Date(x * 1000L)).toInt)
// 把原始rating数据转换成想要的结构productId, score, yearmonth
val ratingOfYearMonthDF = spark.sql("select productId, score, changeDate(timestamp) as yearmonth from ratings")
ratingOfYearMonthDF.createOrReplaceTempView("ratingOfMonth")
val rateMoreRecentlyProductsDF = spark.sql("select productId, count(productId) as count, yearmonth from ratingOfMonth group by yearmonth, productId order by yearmonth desc, count desc")
// 把df保存到mongodb
storeDFInMongoDB( rateMoreRecentlyProductsDF, RATE_MORE_RECENTLY_PRODUCTS )
// 3. 优质商品统计,商品的平均评分,productId,avg
val averageProductsDF = spark.sql("select productId, avg(score) as avg from ratings group by productId order by avg desc")
storeDFInMongoDB( averageProductsDF, AVERAGE_PRODUCTS )
spark.stop()
}
def storeDFInMongoDB(df: DataFrame, collection_name: String)(implicit mongoConfig: MongoConfig): Unit ={
df.write
.option("uri", mongoConfig.uri)
.option("collection", collection_name)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
}
}
代码解析:
spark.udf.register("changeDate", (x: Int)=>simpleDateFormat.format(new Date(x * 1000L)).toInt)
<dependencies>
<!-- Spark的依赖引入 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<!-- 引入Scala -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</dependency>
<!-- 加入MongoDB的驱动 -->
<!-- 用于代码方式连接MongoDB -->
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>casbah-core_2.11</artifactId>
<version>${casbah.version}</version>
</dependency>
<!-- 用于Spark和MongoDB的对接 -->
<dependency>
<groupId>org.mongodb.spark</groupId>
<artifactId>mongo-spark-connector_2.11</artifactId>
<version>${mongodb-spark.version}</version>
</dependency>
</dependencies>
在resources文件夹下引入log4j.properties,然后在src/main/scala下新建scala 单例对象com.atguigu.statistics.StatisticsRecommender。
同样,我们应该先建好样例类,在main()方法中定义配置、创建SparkSession并加载数据,最后关闭spark。
主体代码(src/main/scala/com.atguigu.statistics/StatisticsRecommender.scala):
package com.atguigu.statistics
import java.text.SimpleDateFormat
import java.util.Date
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
case class Rating( userId: Int, productId: Int, score: Double, timestamp: Int )
case class MongoConfig( uri: String, db: String )
object StatisticsRecommender {
// 定义mongodb中存储的表名
val MONGODB_RATING_COLLECTION = "Rating"
val RATE_MORE_PRODUCTS = "RateMoreProducts"
val RATE_MORE_RECENTLY_PRODUCTS = "RateMoreRecentlyProducts"
val AVERAGE_PRODUCTS = "AverageProducts"
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[1]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
// 创建一个spark config
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StatisticsRecommender")
// 创建spark session
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
import spark.implicits._
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
// 加载数据
val ratingDF = spark.read
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[Rating]
.toDF()
// 创建一张叫ratings的临时表
ratingDF.createOrReplaceTempView("ratings")
// TODO: 【 用spark sql去做不同的统计推荐 】
// todo: (1)历史热门商品,按照评分个数统计,productId,count
val rateMoreProductsDF = spark.sql("select productId, count(productId) as count from ratings group by productId order by count desc")
storeDFInMongoDB( rateMoreProductsDF, RATE_MORE_PRODUCTS )
// todo: (2)近期热门商品,把时间戳转换成yyyyMM格式进行评分个数统计,最终得到productId, count, yearmonth
// 创建一个日期格式化工具
val simpleDateFormat = new SimpleDateFormat("yyyyMM")
// 注册UDF,将timestamp转化为年月格式yyyyMM
spark.udf.register("changeDate", (x: Int)=>simpleDateFormat.format(new Date(x * 1000L)).toInt)
// 把原始rating数据转换成想要的结构productId, score, yearmonth
val ratingOfYearMonthDF = spark.sql("select productId, score, changeDate(timestamp) as yearmonth from ratings")
ratingOfYearMonthDF.createOrReplaceTempView("ratingOfMonth")
val rateMoreRecentlyProductsDF = spark.sql("select productId, count(productId) as count, yearmonth from ratingOfMonth group by yearmonth, productId order by yearmonth desc, count desc")
// 把df保存到mongodb
storeDFInMongoDB( rateMoreRecentlyProductsDF, RATE_MORE_RECENTLY_PRODUCTS )
// todo: (3)优质商品统计,商品的平均评分,productId,avg
val averageProductsDF = spark.sql("select productId, avg(score) as avg from ratings group by productId order by avg desc")
storeDFInMongoDB( averageProductsDF, AVERAGE_PRODUCTS )
spark.stop()
}
// TODO: 【 保存到MongoDB数据库 】
def storeDFInMongoDB(df: DataFrame, collection_name: String)(implicit mongoConfig: MongoConfig): Unit ={
df.write
.option("uri", mongoConfig.uri)
.option("collection", collection_name)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
}
}
<dependencies>
<dependency>
<groupId>org.scalanlp</groupId>
<artifactId>jblas</artifactId>
<version>${jblas.version}</version>
</dependency>
<!-- Spark的依赖引入 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-mllib_2.11</artifactId>
</dependency>
<!-- 引入Scala -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</dependency>
<!-- 加入MongoDB的驱动 -->
<!-- 用于代码方式连接MongoDB -->
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>casbah-core_2.11</artifactId>
<version>${casbah.version}</version>
</dependency>
<!-- 用于Spark和MongoDB的对接 -->
<dependency>
<groupId>org.mongodb.spark</groupId>
<artifactId>mongo-spark-connector_2.11</artifactId>
<version>${mongodb-spark.version}</version>
</dependency>
</dependencies>
核心代码如下:src/main/scala/com.atguigu.offline/OfflineRecommender.scala
case class ProductRating(userId: Int, productId: Int, score: Double, timestamp: Int)
case class MongoConfig(uri:String, db:String)
// 标准推荐对象,productId,score
case class Recommendation(productId: Int, score:Double)
// 用户推荐列表
case class UserRecs(userId: Int, recs: Seq[Recommendation])
// 商品相似度(商品推荐)
case class ProductRecs(productId: Int, recs: Seq[Recommendation])
object OfflineRecommmeder {
// 定义常量
val MONGODB_RATING_COLLECTION = "Rating"
// 推荐表的名称
val USER_RECS = "UserRecs"
val PRODUCT_RECS = "ProductRecs"
val USER_MAX_RECOMMENDATION = 20
def main(args: Array[String]): Unit = {
// 定义配置
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
// 创建spark session
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
implicit val mongoConfig = MongoConfig(config("mongo.uri"),config("mongo.db"))
import spark.implicits._
//读取mongoDB中的业务数据
val ratingRDD = spark
.read
.option("uri",mongoConfig.uri)
.option("collection",MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRating]
.rdd
.map(rating=> (rating.userId, rating.productId, rating.score)).cache()
//用户的数据集 RDD[Int]
val userRDD = ratingRDD.map(_._1).distinct()
val prodcutRDD = ratingRDD.map(_._2).distinct()
//创建训练数据集
val trainData = ratingRDD.map(x => Rating(x._1,x._2,x._3))
// rank 是模型中隐语义因子的个数, iterations 是迭代的次数, lambda 是ALS的正则化参
val (rank,iterations,lambda) = (50, 5, 0.01)
// 调用ALS算法训练隐语义模型
val model = ALS.train(trainData,rank,iterations,lambda)
//计算用户推荐矩阵
val userProducts = userRDD.cartesian(productRDD)
// model已训练好,把id传进去就可以得到预测评分列表RDD[Rating] (userId,productId,rating)
val preRatings = model.predict(userProducts)
val userRecs = preRatings
.filter(_.rating > 0)
.map(rating => (rating.user,(rating.product, rating.rating)))
.groupByKey()
.map{
case (userId,recs) => UserRecs(userId,recs.toList.sortWith(_._2 >
_._2).take(USER_MAX_RECOMMENDATION).map(x => Recommendation(x._1,x._2)))
}.toDF()
userRecs.write
.option("uri",mongoConfig.uri)
.option("collection",USER_RECS)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
//TODO:计算商品相似度矩阵
// 关闭spark
spark.stop()
}
}
通过ALS计算商品相似度矩阵,该矩阵用于查询当前商品的相似商品并为实时推荐系统服务。
离线计算的ALS 算法,算法最终会为用户、商品分别生成最终的特征矩阵,分别是表示用户特征矩阵的U(m x k)矩阵,每个用户由 k个特征描述;表示物品特征矩阵的V(n x k)矩阵,每个物品也由 k 个特征描述。
V(n x k)表示物品特征矩阵,每一行是一个 k 维向量,虽然我们并不知道每一个维度的特征意义是什么,但是k 个维度的数学向量表示了该行对应商品的特征。
所以,每个商品用V(n x k)每一行的
数据集中任意两个商品间相似度都可以由公式计算得到,商品与商品之间的相似度在一段时间内基本是固定值。最后生成的数据保存到MongoDB的ProductRecs表中。
//计算商品相似度矩阵
//获取商品的特征矩阵,数据格式 RDD[(scala.Int, scala.Array[scala.Double])]
val productFeatures = model.productFeatures.map{case (productId,features) =>
(productId, new DoubleMatrix(features))
}
// 计算笛卡尔积并过滤合并
val productRecs = productFeatures.cartesian(productFeatures)
.filter{case (a,b) => a._1 != b._1}
.map{case (a,b) =>
val simScore = this.consinSim(a._2,b._2) // 求余弦相似度
(a._1,(b._1,simScore))
}.filter(_._2._2 > 0.6)
.groupByKey()
.map{case (productId,items) =>
ProductRecs(productId,items.toList.map(x => Recommendation(x._1,x._2)))
}.toDF()
productRecs
.write
.option("uri", mongoConfig.uri)
.option("collection",PRODUCT_RECS)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
//计算两个商品之间的余弦相似度
def consinSim(product1: DoubleMatrix, product2:DoubleMatrix) : Double ={
product1.dot(product2) / ( product1.norm2() * product2.norm2() )
}
核心代码:scala/com.atguigu.offline/下新建单例对象ALSTrainer
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
//创建SparkConf
val sparkConf = new SparkConf().setAppName("ALSTrainer").setMaster(config("spark.cores"))
//创建SparkSession
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
val mongoConfig = MongoConfig(config("mongo.uri"),config("mongo.db"))
import spark.implicits._
//加载评分数据
val ratingRDD = spark
.read
.option("uri",mongoConfig.uri)
.option("collection",OfflineRecommender.MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRating]
.rdd
.map(rating => Rating(rating.userId,rating.productId,rating.score)).cache()
// 将一个RDD随机切分成两个RDD,用以划分训练集和测试集
val splits = ratingRDD.randomSplit(Array(0.8, 0.2))
val trainingRDD = splits(0)
val testingRDD = splits(1)
//输出最优参数
adjustALSParams(trainingRDD, testingRDD)
//关闭Spark
spark.close()
}
// 输出最终的最优参数
def adjustALSParams(trainData:RDD[Rating], testData:RDD[Rating]): Unit ={
// 这里指定迭代次数为5,rank和lambda在几个值中选取调整
val result = for(rank <- Array(100,200,250); lambda <- Array(1, 0.1, 0.01, 0.001))
yield {
val model = ALS.train(trainData,rank,5,lambda)
val rmse = getRMSE(model, testData)
(rank,lambda,rmse)
}
// 按照rmse排序
println(result.sortBy(_._3).head)
}
def getRMSE(model:MatrixFactorizationModel, data:RDD[Rating]):Double={
val userProducts = data.map(item => (item.user,item.product))
val predictRating = model.predict(userProducts)
val real = data.map(item => ((item.user,item.product),item.rating))
val predict = predictRating.map(item => ((item.user,item.product),item.rating))
// 计算RMSE
sqrt(
real.join(predict).map{case ((userId,productId),(real,pre))=>
// 真实值和预测值之间的差
val err = real - pre
err * err
}.mean()
)
}
代码主体:
package com.atguigu.offline
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.sql.SparkSession
import org.jblas.DoubleMatrix
case class ProductRating( userId: Int, productId: Int, score: Double, timestamp: Int )
case class MongoConfig( uri: String, db: String )
// 定义标准推荐对象
case class Recommendation( productId: Int, score: Double )
// 定义用户的推荐列表
case class UserRecs( userId: Int, recs: Seq[Recommendation] )
// 定义商品相似度列表
case class ProductRecs( productId: Int, recs: Seq[Recommendation] )
object OfflineRecommender {
// 定义mongodb中存储的表名
val MONGODB_RATING_COLLECTION = "Rating"
val USER_RECS = "UserRecs"
val PRODUCT_RECS = "ProductRecs"
val USER_MAX_RECOMMENDATION = 20
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender"
)
// 创建一个spark config
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineRecommender")
// 创建spark session
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
import spark.implicits._
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
// 加载数据
val ratingRDD = spark.read
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRating]
.rdd
.map(
rating => (rating.userId, rating.productId, rating.score)
).cache()
// 提取出所有用户和商品的数据集
val userRDD = ratingRDD.map(_._1).distinct()
val productRDD = ratingRDD.map(_._2).distinct()
// 核心计算过程
// 1. 训练隐语义模型
val trainData = ratingRDD.map(x=>Rating(x._1,x._2,x._3))
// 定义模型训练的参数,rank隐特征个数,iterations迭代词数,lambda正则化系数
val ( rank, iterations, lambda ) = ( 5, 10, 0.01 )
val model = ALS.train( trainData, rank, iterations, lambda )
// 2. 获得预测评分矩阵,得到用户的推荐列表
// 用userRDD和productRDD做一个笛卡尔积,得到空的userProductsRDD表示的评分矩阵
val userProducts = userRDD.cartesian(productRDD)
val preRating = model.predict(userProducts)
// 从预测评分矩阵中提取得到用户推荐列表
val userRecs = preRating.filter(_.rating>0)
.map(
rating => ( rating.user, ( rating.product, rating.rating ) )
)
.groupByKey()
.map{
case (userId, recs) =>
UserRecs( userId, recs.toList.sortWith(_._2>_._2).take(USER_MAX_RECOMMENDATION).map(x=>Recommendation(x._1,x._2)) )
}
.toDF()
userRecs.write
.option("uri", mongoConfig.uri)
.option("collection", USER_RECS)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
// 3. 利用商品的特征向量,计算商品的相似度列表
val productFeatures = model.productFeatures.map{
case (productId, features) => ( productId, new DoubleMatrix(features) )
}
// 两两配对商品,计算余弦相似度
val productRecs = productFeatures.cartesian(productFeatures)
.filter{
case (a, b) => a._1 != b._1
}
// 计算余弦相似度
.map{
case (a, b) =>
val simScore = consinSim( a._2, b._2 )
( a._1, ( b._1, simScore ) )
}
.filter(_._2._2 > 0.4)
.groupByKey()
.map{
case (productId, recs) =>
ProductRecs( productId, recs.toList.sortWith(_._2>_._2).map(x=>Recommendation(x._1,x._2)) )
}
.toDF()
productRecs.write
.option("uri", mongoConfig.uri)
.option("collection", PRODUCT_RECS)
.mode("overwrite")
.format("com.mongodb.spark.sql")
.save()
spark.stop()
}
def consinSim(product1: DoubleMatrix, product2: DoubleMatrix): Double ={
product1.dot(product2)/ ( product1.norm2() * product2.norm2() )
}
}
<dependencies>
<!-- Spark的依赖引入 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
</dependency>
<!-- 引入Scala -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</dependency>
<!-- 加入MongoDB的驱动 -->
<!-- 用于代码方式连接MongoDB -->
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>casbah-core_2.11</artifactId>
<version>${casbah.version}</version>
</dependency>
<!-- 用于Spark和MongoDB的对接 -->
<dependency>
<groupId>org.mongodb.spark</groupId>
<artifactId>mongo-spark-connector_2.11</artifactId>
<version>${mongodb-spark.version}</version>
</dependency>
<!-- redis -->
<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>2.9.0</version>
</dependency>
<!-- kafka -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
</dependencies>
核心代码:src/main/scala/com.atguigu.streaming/StreamingRecommender.scala
// 连接助手对象
object ConnHelper extends Serializable{
lazy val jedis = new Jedis("localhost")
lazy val mongoClient = MongoClient(MongoClientURI("mongodb://localhost:27017/recommender"))
}
case class MongConfig(uri:String,db:String)
// 标准推荐
case class Recommendation(productId:Int, score:Double)
// 用户的推荐
case class UserRecs(userId:Int, recs:Seq[Recommendation])
//商品的相似度
case class ProductRecs(productId:Int, recs:Seq[Recommendation])
object StreamingRecommender {
val MAX_USER_RATINGS_NUM = 20
val MAX_SIM_PRODUCTS_NUM = 20
val MONGODB_STREAM_RECS_COLLECTION = "StreamRecs"
val MONGODB_RATING_COLLECTION = "Rating"
val MONGODB_PRODUCT_RECS_COLLECTION = "ProductRecs"
//入口方法
def main(args: Array[String]): Unit = {
}
}
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender",
"kafka.topic" -> "recommender"
)
//创建一个SparkConf配置
val sparkConf = new SparkConf().setAppName("StreamingRecommender").setMaster(config("spark.cores"))
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
val sc = spark.sparkContext
val ssc = new StreamingContext(sc,Seconds(2))
implicit val mongConfig = MongConfig(config("mongo.uri"),config("mongo.db"))
import spark.implicits._
// 广播商品相似度矩阵
//装换成为 Map[Int, Map[Int,Double]]
val simProductsMatrix = spark
.read
.option("uri",config("mongo.uri"))
.option("collection",MONGODB_PRODUCT_RECS_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRecs]
.rdd
.map{recs =>
(recs.productId,recs.recs.map(x=> (x.productId,x.score)).toMap)
}.collectAsMap()
val simProductsMatrixBroadCast = sc.broadcast(simProductsMatrix)
//创建到Kafka的连接
val kafkaPara = Map(
"bootstrap.servers" -> "localhost:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "recommender",
"auto.offset.reset" -> "latest"
)
val kafkaStream = KafkaUtils.createDirectStream[String,String](ssc,LocationStrategies.PreferConsistent,ConsumerStrategies.Subscribe[String,String](Array(config("kafka.topic")),kafkaPara))
// UID|MID|SCORE|TIMESTAMP
// 产生评分流
val ratingStream = kafkaStream.map{case msg=>
var attr = msg.value().split("\\|")
(attr(0).toInt,attr(1).toInt,attr(2).toDouble,attr(3).toInt)
}
// 核心实时推荐算法
ratingStream.foreachRDD{rdd =>
rdd.map{case (userId,productId,score,timestamp) =>
println(">>>>>>>>>>>>>>>>")
//获取当前最近的M次商品评分
val userRecentlyRatings = getUserRecentlyRating(MAX_USER_RATINGS_NUM,userId,ConnHelper.jedis)
//获取商品P最相似的K个商品
val simProducts = getTopSimProducts(MAX_SIM_PRODUCTS_NUM,productId,userId,simProductsMatrixBroadCast.value)
//计算待选商品的推荐优先级
val streamRecs = computeProductScores(simProductsMatrixBroadCast.value,userRecentlyRatings,simProducts)
//将数据保存到MongoDB
saveRecsToMongoDB(userId,streamRecs)
}.count()
}
//启动Streaming程序
ssc.start()
ssc.awaitTermination()
}
import scala.collection.JavaConversions._
/**
* 获取当前最近的M次商品评分
* @param num 评分的个数
* @param userId 谁的评分
* @return
*/
def getUserRecentlyRating(num:Int, userId:Int,jedis:Jedis): Array[(Int,Double)] ={
//从用户的队列中取出num个评分
jedis.lrange("userId:"+userId.toString, 0, num).map{item =>
val attr = item.split("\\:")
(attr(0).trim.toInt, attr(1).trim.toDouble)
}.toArray
}
在离线算法中,已经预先将商品的相似度矩阵进行了计算,所以每个商品productId 的最相似的K 个商品很容易获取:从MongoDB中读取ProductRecs数据,从productId 在simHash 对应的子哈希表中获取相似度前K 大的那些商品。输出是数据类型为Array[Int]的数组,表示与productId 最相似的商品集合,并命名为candidateProducts 以作为候选商品集合。
/**
* 获取当前商品K个相似的商品
* @param num 相似商品的数量
* @param productId 当前商品的ID
* @param userId 当前的评分用户
* @param simProducts 商品相似度矩阵的广播变量值
* @param mongConfig MongoDB的配置
* @return
*/
def getTopSimProducts(num:Int, productId:Int, userId:Int, simProducts:scala.collection.Map[Int,scala.collection.immutable.Map[Int,Double]])(implicit mongConfig: MongConfig): Array[Int] ={
//从广播变量的商品相似度矩阵中获取当前商品所有的相似商品
val allSimProducts = simProducts.get(productId).get.toArray
//获取用户已经观看过得商品
val ratingExist = ConnHelper.mongoClient(mongConfig.db)(MONGODB_RATING_COLLECTION).find(MongoDBObject("userId" -> userId)).toArray.map{item =>
item.get("productId").toString.toInt
}
//过滤掉已经评分过得商品,并排序输出
allSimProducts.filter(x => !ratingExist.contains(x._1)).sortWith(_._2 > _._2).take(num).map(x => x._1)
}
/**
* 计算待选商品的推荐分数
* @param simProducts 商品相似度矩阵
* @param userRecentlyRatings 用户最近的k次评分
* @param topSimProducts 当前商品最相似的K个商品
* @return
*/
def computeProductScores(
simProducts:scala.collection.Map[Int,scala.collection.immutable.Map[Int,Doub
le]],userRecentlyRatings:Array[(Int,Double)],topSimProducts: Array[Int]):
Array[(Int,Double)] ={
//用于保存每一个待选商品和最近评分的每一个商品的权重得分
val score = scala.collection.mutable.ArrayBuffer[(Int,Double)]()
//用于保存每一个商品的增强因子数
val increMap = scala.collection.mutable.HashMap[Int,Int]()
//用于保存每一个商品的减弱因子数
val decreMap = scala.collection.mutable.HashMap[Int,Int]()
for (topSimProduct <- topSimProducts; userRecentlyRating <- userRecentlyRatings){
val simScore = getProductsSimScore(simProducts,userRecentlyRating._1,topSimProduct)
if(simScore > 0.6){
score += ((topSimProduct, simScore * userRecentlyRating._2 ))
if(userRecentlyRating._2 > 3){
increMap(topSimProduct) = increMap.getOrDefault(topSimProduct,0) + 1
}else{
decreMap(topSimProduct) = decreMap.getOrDefault(topSimProduct,0) + 1
}
}
}
score.groupBy(_._1).map{case (productId,sims) =>
(productId,sims.map(_._2).sum / sims.length + log(increMap.getOrDefault(productId, 1)) - log(decreMap.getOrDefault(productId, 1)))
}.toArray.sortWith(_._2>_._2)
}
/**
* 获取当个商品之间的相似度
* @param simProducts 商品相似度矩阵
* @param userRatingProduct 用户已经评分的商品
* @param topSimProduct 候选商品
* @return
*/
def getProductsSimScore(
simProducts:scala.collection.Map[Int,scala.collection.immutable.Map[Int,Double]], userRatingProduct:Int, topSimProduct:Int): Double ={
simProducts.get(topSimProduct) match {
case Some(sim) => sim.get(userRatingProduct) match {
case Some(score) => score
case None => 0.0
}
case None => 0.0
}
}
//取10的对数
def log(m:Int):Double ={
math.log(m) / math.log(10)
}
/**
* 将数据保存到MongoDB userId -> 1, recs -> 22:4.5|45:3.8
* @param streamRecs 流式的推荐结果
* @param mongConfig MongoDB的配置
*/
def saveRecsToMongoDB(userId:Int,streamRecs:Array[(Int,Double)])(implicit mongConfig: MongConfig): Unit ={
//到StreamRecs的连接
val streaRecsCollection = ConnHelper.mongoClient(mongConfig.db)(MONGODB_STREAM_RECS_COLLECTION)
streaRecsCollection.findAndRemove(MongoDBObject("userId" -> userId))
streaRecsCollection.insert(MongoDBObject("userId" -> userId, "recs" ->
streamRecs.map( x => MongoDBObject("productId"->x._1,"score"->x._2)) ))
}
代码主体:
package com.atguigu.online
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoClientURI}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis
// 定义一个连接助手对象,建立到redis和mongodb的连接
object ConnHelper extends Serializable{
// 懒变量定义,使用的时候才初始化
lazy val jedis = new Jedis("localhost")
lazy val mongoClient = MongoClient(MongoClientURI("mongodb://localhost:27017/recommender"))
}
case class MongoConfig( uri: String, db: String )
// 定义标准推荐对象
case class Recommendation( productId: Int, score: Double )
// 定义用户的推荐列表
case class UserRecs( userId: Int, recs: Seq[Recommendation] )
// 定义商品相似度列表
case class ProductRecs( productId: Int, recs: Seq[Recommendation] )
object OnlineRecommender {
// 定义常量和表名
val MONGODB_RATING_COLLECTION = "Rating"
val STREAM_RECS = "StreamRecs"
val PRODUCT_RECS = "ProductRecs"
val MAX_USER_RATING_NUM = 20
val MAX_SIM_PRODUCTS_NUM = 20
def main(args: Array[String]): Unit = {
val config = Map(
"spark.cores" -> "local[*]",
"mongo.uri" -> "mongodb://localhost:27017/recommender",
"mongo.db" -> "recommender",
"kafka.topic" -> "recommender"
)
// 创建spark conf
val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OnlineRecommender")
val spark = SparkSession.builder().config(sparkConf).getOrCreate()
val sc = spark.sparkContext
val ssc = new StreamingContext(sc, Seconds(2))
import spark.implicits._
implicit val mongoConfig = MongoConfig( config("mongo.uri"), config("mongo.db") )
// 加载数据,相似度矩阵,广播出去
val simProductsMatrix = spark.read
.option("uri", mongoConfig.uri)
.option("collection", PRODUCT_RECS)
.format("com.mongodb.spark.sql")
.load()
.as[ProductRecs]
.rdd
// 为了后续查询相似度方便,把数据转换成map形式
.map{item =>
( item.productId, item.recs.map( x=>(x.productId, x.score) ).toMap )
}
.collectAsMap()
// 定义广播变量
val simProcutsMatrixBC = sc.broadcast(simProductsMatrix)
// 创建kafka配置参数
val kafkaParam = Map(
"bootstrap.servers" -> "localhost:9092",
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "recommender",
"auto.offset.reset" -> "latest"
)
// 创建一个DStream
val kafkaStream = KafkaUtils.createDirectStream[String, String]( ssc,
LocationStrategies.PreferConsistent,
ConsumerStrategies.Subscribe[String, String]( Array(config("kafka.topic")), kafkaParam )
)
// 对kafkaStream进行处理,产生评分流,userId|productId|score|timestamp
val ratingStream = kafkaStream.map{msg=>
var attr = msg.value().split("\\|")
( attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt )
}
// 核心算法部分,定义评分流的处理流程
ratingStream.foreachRDD{
rdds => rdds.foreach{
case ( userId, productId, score, timestamp ) =>
println("rating data coming!>>>>>>>>>>>>>>>>>>")
// TODO: 核心算法流程
// 1. 从redis里取出当前用户的最近评分,保存成一个数组Array[(productId, score)]
val userRecentlyRatings = getUserRecentlyRatings( MAX_USER_RATING_NUM, userId, ConnHelper.jedis )
// 2. 从相似度矩阵中获取当前商品最相似的商品列表,作为备选列表,保存成一个数组Array[productId]
val candidateProducts = getTopSimProducts( MAX_SIM_PRODUCTS_NUM, productId, userId, simProcutsMatrixBC.value )
// 3. 计算每个备选商品的推荐优先级,得到当前用户的实时推荐列表,保存成 Array[(productId, score)]
val streamRecs = computeProductScore( candidateProducts, userRecentlyRatings, simProcutsMatrixBC.value )
// 4. 把推荐列表保存到mongodb
saveDataToMongoDB( userId, streamRecs )
}
}
// 启动streaming
ssc.start()
println("streaming started!")
ssc.awaitTermination()
}
/**
* 从redis里获取最近num次评分
*/
import scala.collection.JavaConversions._
def getUserRecentlyRatings(num: Int, userId: Int, jedis: Jedis): Array[(Int, Double)] = {
// 从redis中用户的评分队列里获取评分数据,list键名为uid:USERID,值格式是 PRODUCTID:SCORE
jedis.lrange( "userId:" + userId.toString, 0, num )
.map{ item =>
val attr = item.split("\\:")
( attr(0).trim.toInt, attr(1).trim.toDouble )
}
.toArray
}
// 获取当前商品的相似列表,并过滤掉用户已经评分过的,作为备选列表
def getTopSimProducts(num: Int,
productId: Int,
userId: Int,
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]])
(implicit mongoConfig: MongoConfig): Array[Int] ={
// 从广播变量相似度矩阵中拿到当前商品的相似度列表
val allSimProducts = simProducts(productId).toArray
// 获得用户已经评分过的商品,过滤掉,排序输出
val ratingCollection = ConnHelper.mongoClient( mongoConfig.db )( MONGODB_RATING_COLLECTION )
val ratingExist = ratingCollection.find( MongoDBObject("userId"->userId) )
.toArray
.map{item=> // 只需要productId
item.get("productId").toString.toInt
}
// 从所有的相似商品中进行过滤
allSimProducts.filter( x => ! ratingExist.contains(x._1) )
.sortWith(_._2 > _._2)
.take(num)
.map(x=>x._1)
}
// 计算每个备选商品的推荐得分
def computeProductScore(candidateProducts: Array[Int],
userRecentlyRatings: Array[(Int, Double)],
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]])
: Array[(Int, Double)] ={
// 定义一个长度可变数组ArrayBuffer,用于保存每一个备选商品的基础得分,(productId, score)
val scores = scala.collection.mutable.ArrayBuffer[(Int, Double)]()
// 定义两个map,用于保存每个商品的高分和低分的计数器,productId -> count
val increMap = scala.collection.mutable.HashMap[Int, Int]()
val decreMap = scala.collection.mutable.HashMap[Int, Int]()
// 遍历每个备选商品,计算和已评分商品的相似度
for( candidateProduct <- candidateProducts; userRecentlyRating <- userRecentlyRatings ){
// 从相似度矩阵中获取当前备选商品和当前已评分商品间的相似度
val simScore = getProductsSimScore( candidateProduct, userRecentlyRating._1, simProducts )
if( simScore > 0.4 ){
// 按照公式进行加权计算,得到基础评分
scores += ( (candidateProduct, simScore * userRecentlyRating._2) )
if( userRecentlyRating._2 > 3 ){
increMap(candidateProduct) = increMap.getOrDefault(candidateProduct, 0) + 1
} else {
decreMap(candidateProduct) = decreMap.getOrDefault(candidateProduct, 0) + 1
}
}
}
// 根据公式计算所有的推荐优先级,首先以productId做groupby
scores.groupBy(_._1).map{
case (productId, scoreList) =>
( productId, scoreList.map(_._2).sum/scoreList.length + log(increMap.getOrDefault(productId, 1)) - log(decreMap.getOrDefault(productId, 1)) )
}
// 返回推荐列表,按照得分排序
.toArray
.sortWith(_._2>_._2)
}
def getProductsSimScore(product1: Int, product2: Int,
simProducts: scala.collection.Map[Int, scala.collection.immutable.Map[Int, Double]]): Double ={
simProducts.get(product1) match {
case Some(sims) => sims.get(product2) match {
case Some(score) => score
case None => 0.0
}
case None => 0.0
}
}
// 自定义log函数,以N为底
def log(m: Int): Double = {
val N = 10
math.log(m)/math.log(N)
}
// 写入mongodb
def saveDataToMongoDB(userId: Int, streamRecs: Array[(Int, Double)])(implicit mongoConfig: MongoConfig): Unit ={
val streamRecsCollection = ConnHelper.mongoClient(mongoConfig.db)(STREAM_RECS)
// 按照userId查询并更新
streamRecsCollection.findAndRemove( MongoDBObject( "userId" -> userId ) )
streamRecsCollection.insert( MongoDBObject( "userId" -> userId,
"recs" -> streamRecs.map(x=>MongoDBObject("productId"->x._1, "score"->x._2)) ) )
}
}
bin/zkServer.sh start
bin/kafka-server-start.sh -daemon ./config/server.properties
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>0.10.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.2.1</version>
</dependency>
</dependencies>
<build>
<finalName>kafkastream</finalName>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<archive>
<manifest>
<mainClass>com.atguigu.kafkastream.Application</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
public class Application {
public static void main(String[] args){
String brokers = "localhost:9092";
String zookeepers = "localhost:2181";
// 定义输入和输出的topic
String from = "log";
String to = "recommender";
// 定义kafka streaming的配置
Properties settings = new Properties();
settings.put(StreamsConfig.APPLICATION_ID_CONFIG, "logFilter");
settings.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
settings.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, zookeepers);
StreamsConfig config = new StreamsConfig(settings);
// 拓扑建构器
TopologyBuilder builder = new TopologyBuilder();
// 定义流处理的拓扑结构
builder.addSource("SOURCE", from)
.addProcessor("PROCESS", () -> new LogProcessor(), "SOURCE")
.addSink("SINK", to, "PROCESS");
KafkaStreams streams = new KafkaStreams(builder, config);
streams.start();
}
}
public class LogProcessor implements Processor<byte[],byte[]> {
private ProcessorContext context;
public void init(ProcessorContext context) {
this.context = context;
}
public void process(byte[] dummy, byte[] line) {
String input = new String(line);
// 根据前缀过滤日志信息,提取后面的内容
if(input.contains("PRODUCT_RATING_PREFIX:")){
System.out.println("product rating coming!!!!" + input);
input = input.split("PRODUCT_RATING_PREFIX:")[1].trim();
context.forward("logProcessor".getBytes(), input.getBytes());
}
}
public void punctuate(long timestamp) {
}
public void close() {
}
}
agent.sources = exectail
agent.channels = memoryChannel
agent.sinks = kafkasink
# For each one of the sources, the type is defined
agent.sources.exectail.type = exec
# 下面这个路径是需要收集日志的绝对路径,改为自己的日志目录
agent.sources.exectail.command = tail –f
/mnt/d/Projects/BigData/ECommerceRecommenderSystem/businessServer/src/main/log/agent.log
agent.sources.exectail.interceptors=i1
agent.sources.exectail.interceptors.i1.type=regex_filter
# 定义日志过滤前缀的正则
agent.sources.exectail.interceptors.i1.regex=.+PRODUCT_RATING_PREFIX.+
# The channel can be defined as follows.
agent.sources.exectail.channels = memoryChannel
# Each sink's type must be defined
agent.sinks.kafkasink.type = org.apache.flume.sink.kafka.KafkaSink
agent.sinks.kafkasink.kafka.topic = log
agent.sinks.kafkasink.kafka.bootstrap.servers = localhost:9092
agent.sinks.kafkasink.kafka.producer.acks = 1
agent.sinks.kafkasink.kafka.flumeBatchSize = 20
#Specify the channel the sink should use
agent.sinks.kafkasink.channel = memoryChannel
# Each channel's type is defined.
agent.channels.memoryChannel.type = memory
# Other config values specific to each type of channel(sink or source)
# can be defined as well
# In this case, it specifies the capacity of the memory channel
agent.channels.memoryChannel.capacity = 10000
配置好后,启动flume:
./bin/flume-ng agent -c ./conf/ -f ./conf/log-kafka.properties -n agent -Dflume.root.logger=INFO,console
// 载入商品数据集
val productTagsDF = spark
.read
.option("uri",mongoConfig.uri)
.option("collection",MONGODB_PRODUCT_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[Product]
.map(x => (x.productId, x.name, x.genres.map(c => if(c == '|') ' ' else c)))
.toDF("productId", "name", "tags").cache()
// 实例化一个分词器,默认按空格分
val tokenizer = new Tokenizer().setInputCol("tags").setOutputCol("words")
// 用分词器做转换
val wordsData = tokenizer.transform(productTagsDF)
// 定义一个HashingTF工具
val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(200)
// 用 HashingTF 做处理
val featurizedData = hashingTF.transform(wordsData)
// 定义一个IDF工具
val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
// 将词频数据传入,得到idf模型(统计文档)
val idfModel = idf.fit(featurizedData)
// 用tf-idf算法得到新的特征矩阵
val rescaledData = idfModel.transform(featurizedData)
// 从计算得到的 rescaledData 中提取特征向量
val productFeatures = rescaledData.map{
case row => ( row.getAs[Int]("productId"),row.getAs[SparseVector]("features").toArray )
}
.rdd
.map(x => {
(x._1, new DoubleMatrix(x._2) )
})
val ratingDF = spark.read
.option("uri", mongoConfig.uri)
.option("collection", MONGODB_RATING_COLLECTION)
.format("com.mongodb.spark.sql")
.load()
.as[Rating]
.map(x=> (x.userId, x.productId, x.score) )
.toDF("userId", "productId", "rating")
// 统计每个商品的评分个数,并通过内连接添加到 ratingDF 中
val numRatersPerProduct = ratingDF.groupBy("productId").count()
val ratingWithCountDF = ratingDF.join(numRatersPerProduct, "productId")
// 将商品评分按 userId 两两配对,可以统计两个商品被同一用户做出评分的次数
val joinedDF = ratingWithCountDF.join(ratingWithCountDF, "userId")
.toDF("userId", "product1", "rating1", "count1", "product2", "rating2", "count2")
.select("userId", "product1", "count1", "product2", "count2")
joinedDF.createOrReplaceTempView("joined")
val cooccurrenceDF = spark.sql(
"""
|select product1
|, product2
|, count(userId) as coocount
|, first(count1) as count1
|, first(count2) as count2
|from joined
|group by product1, product2
""".stripMargin
).cache()
val simDF = cooccurrenceDF.map{ row =>
// 用同现的次数和各自的次数,计算同现相似度
val coocSim = cooccurrenceSim( row.getAs[Long]("coocount"), row.getAs[Long]("count1"), row.getAs[Long]("count2") )
( row.getAs[Int]("product1"), ( row.getAs[Int]("product2"), coocSim ) )
}
.rdd
.groupByKey()
.map{
case (productId, recs) =>
ProductRecs( productId,
recs.toList
.filter(x=>x._1 != productId)
.sortWith(_._2>_._2)
.map(x=>Recommendation(x._1,x._2))
.take(MAX_RECOMMENDATION)
)
}
.toDF()
def cooccurrenceSim(cooCount: Long, count1: Long, count2: Long): Double ={
cooCount / math.sqrt( count1 * count2 )
}
java -cp kafkastream.jar com.atguigu.kafkastream.Application linux:9092 linux:2181 log recommender
agent.sources = exectail
agent.channels = memoryChannel
agent.sinks = kafkasink
# For each one of the sources, the type is defined
agent.sources.exectail.type = exec
agent.sources.exectail.command = tail -f /home/bigdata/cluster/apache-tomcat-8.5.23/logs/catalina.out
agent.sources.exectail.interceptors=i1
agent.sources.exectail.interceptors.i1.type=regex_filter
agent.sources.exectail.interceptors.i1.regex=.+PRODUCT_RATING_PREFIX.+
# The channel can be defined as follows.
agent.sources.exectail.channels = memoryChannel
# Each sink's type must be defined
agent.sinks.kafkasink.type = org.apache.flume.sink.kafka.KafkaSink
agent.sinks.kafkasink.kafka.topic = log
agent.sinks.kafkasink.kafka.bootstrap.servers = linux:9092
agent.sinks.kafkasink.kafka.producer.acks = 1
agent.sinks.kafkasink.kafka.flumeBatchSize = 20
#Specify the channel the sink should use
agent.sinks.kafkasink.channel = memoryChannel
# Each channel's type is defined.
agent.channels.memoryChannel.type = memory
# Other config values specific to each type of channel(sink or source)
# can be defined as well
# In this case, it specifies the capacity of the memory channel
agent.channels.memoryChannel.capacity = 10000
bin/flume-ng agent -c ./conf/ -f ./conf/log-kafka.properties -n agent
bin/spark-submit --class com.atguigu.streamingRecommender.StreamingRecommender streamingRecommender-1.0-SNAPSHOT.jar
type=command
command=/home/bigdata/cluster/spark-2.1.1-bin-hadoop2.7/bin/spark-submit --class com.atguigu.offline.RecommenderTrainerApp
offlineRecommender-1.0-SNAPSHOT.jar
type=command
command=/home/bigdata/cluster/spark-2.1.1-bin-hadoop2.7/bin/spark-submit --class com.atguigu.statisticsRecommender.StatisticsApp
statisticsRecommender-1.0-SNAPSHOT.jar