今晚上课内容:
1 map、filter、flatmap操作回顾
2 reduceByKey、groupByKey
3 join、cogroup
join和cogroup是所有Spark学习者必须掌握的内容。
大数据中最重要的算子操作是:join!!!!!
以下是今天的项目实例及作业(用Scala写cogroup写一个实例)
package com.dt.spark.cores
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by chenjh on 2016/1/22.
*/
object Tranformations {
//main方法中调用的每一个功能,必须是每个模块可以使用函数封装
def main(args: Array[String]) {
val sc = sparkContext("Transformation Operations")
//mapTranformation(sc)
//filterTransformation(sc)
//flatMapTransformation(sc)
//groupByKeyTransformation(sc)
//reduceByKeyTransformation(sc)
//joinTransformation(sc)
//作业
cogroupTransformation(sc)
//停止SparkContext, 销毁相关的Driver对象,释放资源
sc.stop()
}
def sparkContext(name: String) = {
//创建SparkConf,初始化程序的配置
val conf = new SparkConf().setAppName("Transformations").setMaster("local")
//创建SparkContext,这是第一个RDD创建的唯一入口,也是Driver的灵魂,是通往集群的唯一通道
val sc = new SparkContext(conf)
sc
}
def mapTranformation(sc:SparkContext){
val nums = sc.parallelize(1 to 10)
//循环遍历每个元素
//map适合于任何类型的元素,且对其作用的集合中的每一个元素循环遍历,并调用其作为参数的函数对每一个遍历的元素进行具体化处理:
val mapped = nums.map(item => 2 * item)
//收集计算结果并通过foreach循环打印
mapped.collect().foreach(println)
}
def filterTransformation(sc: SparkContext): Unit = {
//根据集合创建RDD
val nums = sc.parallelize(1 to 20)
//filter 中作为参数的函数的Boolean来判断符合条件的元素,并基于这些元素构建新的MapPartitionRDD
val filtered = nums.filter(item => item % 2 ==0)
filtered.collect().foreach(println)// 收集计算结果并通过 foreach循环打印
}
def flatMapTransformation(sc: SparkContext) {
//实例化字符串类型的Array
val bigData = Array("Scala Spark", "Java Hadoop", "Java Tachyon")
//创建以字符串元素类型的parallelCollectionRDD
val bigDataString = sc.parallelize(bigData)
//首先是通过传入的作为参数的函数来作用于RDD的每个字符串进行单词切分,是以集合的方式存在{Scala Spark Java Hadoop Java Tachyon}
val words = bigDataString.flatMap(line => line.split(" "))
words.collect().foreach(println)
}
def groupByKeyTransformation(sc: SparkContext) {
//准备数据
val data = Array(
Tuple2(100,"Spark"),
Tuple2(100,"Tachyon"),
Tuple2(70,"Hadoop"),
Tuple2(80,"Kafka"),
Tuple2(70,"HBase"))
//创建RDD
val dataRdd = sc.parallelize(data)
//按照相同的key对Value进行分组,分组后的Value是一个集合
val grouped = dataRdd.groupByKey()
//收集计算结果并通过foreach循环打印
grouped.collect().foreach(println)
}
def reduceByKeyTransformation(sc : SparkContext){
//val sc = new SparkContext(conf)
val lines = sc.textFile("C://SharedFolder//TextLines.txt",1)//并行度是1
val words = lines.flatMap{line => line.split(" ")}
val pairs = words.map {word =>(word,1)}
val wordCounts = pairs.reduceByKey(_+_)
wordCounts.collect.foreach(wordNumberPair => println(wordNumberPair._1 + " : " + wordNumberPair._2))
}
def joinTransformation(sc : SparkContext) {
val studentNames= Array(
Tuple2(1,"Spark"),
Tuple2(2,"Tachyon"),
Tuple2(3,"Hadoop")
)
val studentScores= Array(
Tuple2(1,100),
Tuple2(2,95),
Tuple2(3,70)
)
val names =sc.parallelize(studentNames)
val scores = sc.parallelize(studentScores)
val studentNameAndScore = names.join(scores)
studentNameAndScore.collect.foreach(println)
}
def cogroupTransformation(sc: SparkContext): Unit = {
val studentNames= Array(
Tuple2(1,"Spark"),
Tuple2(2,"Tachyon"),
Tuple2(3,"Hadoop")
)
val studentScores= Array(
Tuple2(1,100),
Tuple2(2,95),
Tuple2(3,70),
Tuple2(1,80),
Tuple2(3,89)
)
val names =sc.parallelize(studentNames)
val scores = sc.parallelize(studentScores)
val studentNameAndScore = names.cogroup(scores)
//打印输出
studentNameAndScore.collect.foreach(println)
}
}
打印结果:
(1,(CompactBuffer(Spark),CompactBuffer(100, 80)))
(3,(CompactBuffer(Hadoop),CompactBuffer(70, 61, 89)))
(2,(CompactBuffer(Tachyon),CompactBuffer(95)))
附上王老师个人名片信息
王家林 中国Spark第一人
DT大数据梦工厂
新浪微博: http://weibo.com.ilovepains/
微信公共号:DT_Spark
博客:http://bolg.sina.com.cn/ilovepains
手机:18610086859
qq:1740415547
邮箱:[email protected]