sparksql各种数据源

sparksql各种数据源的测试:
大致的有json文件parquet文件,和常用的文件,jdbc等
还有hbase的数据源(还没有贴出,可能要等几天贴出来了)
代码:

一般过程:
第一步创建:利用SparkSeesion进行创建,一般是sparkSeesion.read.format(“格式”).load(“文件路径”)
第二部:进行一般操作
第三部:保存文件,或者保存到其他的地方:一般是sparkSeesion.write.format(“格式”).save(“文件路径”)

package sql

import org.apache.spark.sql.SparkSession

object SQLDataSourceExample {

  case class Person(name: String, age: Long)

  def main(args: Array[String]) {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("Spark SQL data sources example")
      .config("spark.some.config.option", "some-value")
      .getOrCreate()

    /*runBasicDataSourceExample(spark)
    runBasicParquetExample(spark)*/
    runParquetSchemaMergingExample(spark)
    /* runJsonDatasetExample(spark)
    runJdbcDatasetExample(spark)*/

    spark.stop()
  }

  private def runBasicDataSourceExample(spark: SparkSession): Unit = {
    println("--------------------      runBasicDataSourceExample  start    -----------------")
    // $example on:generic_load_save_functions$
    val usersDF = spark.read.load("spark_sql/src/main/resources/users.parquet")
    println("--------------------parquet-----------------")
    usersDF.printSchema()
    usersDF.show()
    usersDF.select("name", "favorite_color").write.save("spark_sql/src/main/resources/result/namesAndFavColors.parquet1")
    // $example off:generic_load_save_functions$
    // $example on:manual_load_options$
    println("--------------------json-----------------")
    val peopleDF = spark.read.format("json").load("spark_sql/src/main/resources/people.json")
    peopleDF.show()
    peopleDF.select("name", "age").write.format("parquet").save("spark_sql/src/main/resources/result/namesAndAges.parquet1")

    println("--------------------直接使用sql-----------------")
    val sqlDF = spark.sql("SELECT * FROM parquet.`spark_sql/src/main/resources/users.parquet`")
    sqlDF.show()

    println("--------------------      runBasicDataSourceExample  end    -----------------")
  }

  private def runBasicParquetExample(spark: SparkSession): Unit = {
    println("--------------------      runBasicParquetExample  start    -----------------")
    import spark.implicits._

    val peopleDF = spark.read.json("spark_sql/src/main/resources/people.json")

    peopleDF.write.parquet("spark_sql/src/main/resource/result/people.parquet")

    val parquetFileDF = spark.read.parquet("spark_sql/src/main/resource/result/people.parquet")

    println("-------创建临时表进行sql-------")
    parquetFileDF.createOrReplaceTempView("parquetFile")
    val namesDF = spark.sql("SELECT name FROM parquetFile WHERE age BETWEEN 13 AND 19")
    namesDF.map(attributes => "Name: " + attributes(0)).show()

    println("--------------------      runBasicParquetExample  end    -----------------")
  }

  private def runParquetSchemaMergingExample(spark: SparkSession): Unit = {
    println("--------------------      runParquetSchemaMergingExample  start    -----------------")
    import spark.implicits._
println("---------创建一个普通的dataframe,然后保存为一个square文件------")
    // Create a simple DataFrame, store into a partition directory
    val squaresDF = spark.sparkContext.makeRDD(1 to 5).map(i => (i, i * i)).toDF("value", "square")
    squaresDF.write.parquet("spark_sql/src/main/resource/result/data/test_table_key=1")

    // Create another DataFrame in a new partition directory,
    // adding a new column and dropping an existing column
    val cubesDF = spark.sparkContext.makeRDD(6 to 10).map(i => (i, i * i * i)).toDF("value", "cube")
    cubesDF.write.parquet("spark_sql/src/main/resource/result/data/test_table_key=2")

    // Read the partitioned table
    val mergedDF = spark.read.option("mergeSchema", "true").parquet("data/test_table")
    mergedDF.printSchema()

    // The final schema consists of all 3 columns in the Parquet files together
    // with the partitioning column appeared in the partition directory paths
    // root
    //  |-- value: int (nullable = true)
    //  |-- square: int (nullable = true)
    //  |-- cube: int (nullable = true)
    //  |-- key: int (nullable = true)
    // $example off:schema_merging$
  }

  private def runJsonDatasetExample(spark: SparkSession): Unit = {
    // $example on:json_dataset$
    // A JSON dataset is pointed to by path.
    // The path can be either a single text file or a directory storing text files
    val path = "examples/src/main/resources/people.json"
    val peopleDF = spark.read.json(path)

    // The inferred schema can be visualized using the printSchema() method
    peopleDF.printSchema()
    // root
    //  |-- age: long (nullable = true)
    //  |-- name: string (nullable = true)

    // Creates a temporary view using the DataFrame
    peopleDF.createOrReplaceTempView("people")

    // SQL statements can be run by using the sql methods provided by spark
    val teenagerNamesDF = spark.sql("SELECT name FROM people WHERE age BETWEEN 13 AND 19")
    teenagerNamesDF.show()
    // +------+
    // |  name|
    // +------+
    // |Justin|
    // +------+

    // Alternatively, a DataFrame can be created for a JSON dataset represented by
    // an RDD[String] storing one JSON object per string
    val otherPeopleRDD = spark.sparkContext.makeRDD(
      """{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}""" :: Nil)
    val otherPeople = spark.read.json(otherPeopleRDD)
    otherPeople.show()
    // +---------------+----+
    // |        address|name|
    // +---------------+----+
    // |[Columbus,Ohio]| Yin|
    // +---------------+----+
    // $example off:json_dataset$
  }

  private def runJdbcDatasetExample(spark: SparkSession): Unit = {

    val jdbcDF = spark.read
      .format("jdbc")
      .option("url", "jdbc:postgresql:dbserver")
      .option("dbtable", "schema.tablename")
      .option("user", "username")
      .option("password", "password")
      .load()
    // $example off:jdbc_dataset$
  }
}

hbase作为源:稍后补上

你可能感兴趣的:(spark)