使用 Spark 跨集群同步HDFS数据

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object TestFileCopy {

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val conf: SparkConf = new SparkConf()
      .setIfMissing("spark.master", "local[4]")
      .setAppName("Test File Copy App")

    val spark: SparkSession = SparkSession.builder.config(conf).getOrCreate()

    //获取 SparkSession 的 SparkContext
    val sc: SparkContext = spark.sparkContext
//    hdfsFileCopy1(sc)
    hdfsFileCopy2(sc)
    sc.stop()
  }

  def hdfsFileCopy1(sc: SparkContext){
    // 在输入数据之前先将hadoop config配置为cluster1集群
    sc.hadoopConfiguration.addResource("cluster1/core-site.xml")
    sc.hadoopConfiguration.addResource("cluster1/hdfs-site.xml")

    val sourceDatePath = "hdfs://cluster1/tmp/"
    val source: RDD[String] = sc.textFile(sourceDatePath + "aaa.txt")

    source.foreach(println(_))
    // 再将 hadoop config 设为cluster2集群
    sc.hadoopConfiguration.addResource("cluster2/core-site.xml")
    sc.hadoopConfiguration.addResource("cluster2/hdfs-site.xml")
    val targetDatePath = "hdfs://cluster2/tmp/hdb/"
    source.saveAsTextFile(targetDatePath)
  }

  def hdfsFileCopy2(sc: SparkContext){
    // cluster1
    sc.hadoopConfiguration.set("fs.defaultFS", "hdfs://cluster1");
    sc.hadoopConfiguration.set("dfs.nameservices", "cluster1");
    sc.hadoopConfiguration.set("dfs.ha.namenodes.cluster1", "namenode98,namenode143");
    sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster1.namenode98", "cdh-nn-01:8020");
    sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster1.namenode143", "cdh-nn-02:8020");
    sc.hadoopConfiguration.set("dfs.client.failover.proxy.provider.cluster1", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");

    val sourceDatePath = "hdfs://cluster1/tmp/"
    val source: RDD[String] = sc.textFile(sourceDatePath + "aaa.txt")
    source.foreach(println(_))

    // cluster2
    sc.hadoopConfiguration.set("fs.defaultFS", "hdfs://cluster2");
    sc.hadoopConfiguration.set("dfs.nameservices", "cluster2");
    sc.hadoopConfiguration.set("dfs.ha.namenodes.cluster2", "namenode424,namenode417");
    sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster2.namenode424", "node-nn-01:8020");
    sc.hadoopConfiguration.set("dfs.namenode.rpc-address.cluster2.namenode417", "node-nn-02:8020");
    sc.hadoopConfiguration.set("dfs.client.failover.proxy.provider.cluster2", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");

    val targetDatePath = "hdfs://cluster2/tmp/hdb/"
    source.saveAsTextFile(targetDatePath)
  }
}

你可能感兴趣的:(Spark)