Spark 读取ElasticSearch

不啰嗦先上代码

      /**
       * 初始化spark
       */
      val sparkName = "Read_ES"
      val sparkConf = new SparkConf().setAppName(sparkName)
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .set("spark.kryoserializer.buffer.max.mb", "512")
      setEsConf(sparkConf)
      val spark = SparkSession.builder().config(sparkConf).getOrCreate()
      val name = "小明"
      val query =
      s"""
            {
         |  "query":{
         |    "match":{
         |      "name":"$name"
         |    }
         |  }
         |}
         |""".stripMargin
      //查询语句只要满足ES查询规范就行
     val index = "es索引"
     val esDataDf= EsSpark.esRDD(spark.sparkContext, index, query)





  public static void setEsConf(SparkConf sparkConf) {
        /**
         * ES_NODES 无需配置多个,ES节点会自动发现,无论是Master节点还是数据节点都可以
         */
        sparkConf.set(ConfigurationOptions.ES_NODES, "ES节点名称");
        sparkConf.set(ConfigurationOptions.ES_PORT, "8080");
        sparkConf.set(ConfigurationOptions.ES_NET_HTTP_AUTH_USER, "user");//如果需要验证写上
        sparkConf.set(ConfigurationOptions.ES_NET_HTTP_AUTH_PASS, "password");//如果需要验证写上
        sparkConf.set(ConfigurationOptions.ES_INDEX_AUTO_CREATE, "true");        
        sparkConf.set(ConfigurationOptions.ES_BATCH_WRITE_REFRESH, "false");
        sparkConf.set(ConfigurationOptions.ES_BATCH_SIZE_BYTES, "5mb");
        sparkConf.set(ConfigurationOptions.ES_BATCH_SIZE_ENTRIES, "500");
        sparkConf.set(ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY, "simple");
        sparkConf.set(ConfigurationOptions.ES_BATCH_WRITE_RETRY_COUNT, "10");
        sparkConf.set(ConfigurationOptions.ES_BATCH_WRITE_RETRY_WAIT, "10s");
        sparkConf.set(ConfigurationOptions.ES_NODES_WAN_ONLY, "true");
    }

查询语句只要满足ES规范就行,这里使用的是下面的包


  org.elasticsearch
  elasticsearch-hadoop
  xxx

你可能感兴趣的:(Spark,Elasticsearch,spark,elasticsearch,大数据)