案例需求
数据介绍
代码如下:
package base.charpter7
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
/**
* @projectName sparkGNU2023
* @package base.charpter7
* @className base.charpter7.Join
* @description ${description}
* @author pblh123
* @date 2023/11/28 17:25
* @version 1.0
*
*/
object Join {
def main(args: Array[String]): Unit = {
// 1. 创建一个sc对象
if (args.length != 4) {
println("usage is WordCount
运行结果
需求及数据说明:
代码实现
SecondarySortKey.class 方法
package base.charpter7
/**
* @projectName sparkGNU2023
* @package base.charpter7
* @className base.charpter7.SecondarySortKey
* @description ${description}
* @author pblh123
* @date 2023/11/29 17:01
* @version 1.0
*/
class SecondarySortKey(val first:Int, val second:Int) extends Ordered[SecondarySortKey] with Serializable{
override def compare(that: SecondarySortKey): Int = {
if (this.first - that.first != 0){
this.first - that.first
} else {
this.second - that.second
}
}
}
SecondarySortApp.scala方法
package base.charpter7
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
/**
* @projectName sparkGNU2023
* @package base.charpter7
* @className base.charpter7.SecondarySortApp
* @description ${description}
* @author pblh123
* @date 2023/11/29 17:04
* @version 1.0
*
*/
object SecondarySortApp {
def main(args: Array[String]): Unit = {
// 1. 创建spark,sc对象
if (args.length != 2) {
println("您需要输入二个参数")
System.exit(5)
}
val musrl: String = args(0)
val spark: SparkSession = new SparkSession.Builder()
.appName(s"${this.getClass.getSimpleName}")
.master(musrl)
.getOrCreate()
val sc: SparkContext = spark.sparkContext
// 2. 代码主体
// 读取一个txt文件
val inputfile: String = args(1)
val lines: RDD[String] = sc.textFile(inputfile, 1)
// 进行二次排序
val pairRDDwithSort: RDD[(SecondarySortKey, String)] = lines.map(line => {
val strings: Array[String] = line.split(" ")
(new SecondarySortKey(strings(0).toInt, strings(1).toInt), line)
})
val pairRDDwithSort2: RDD[(SecondarySortKey, String)] = pairRDDwithSort.sortByKey(false)
val sortedRes: RDD[String] = pairRDDwithSort2.map(sortedline => sortedline._2)
sortedRes.collect().foreach(println)
// 3. 关闭sc,spark对象
sc.stop()
spark.stop()
}
}
配置参数
运行效果