spark-note

阅读更多
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.types._
import spark.implicits._
import org.apache.spark.sql.functions.udf

import org.apache.spark.sql._



object Run {
  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("test").setMaster("local")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    val sqlContext = new SQLContext(sc)

    /**
      * id      age
      * 1       30
      * 2       29
      * 3       21
      */
    case class Person(id: Int, age: Int)
    val idAgeRDDPerson = sc.parallelize(Array(Person(1, 30), Person(2, 29), Person(3, 21)))

    // 优点1
    // idAge.filter(_.age > "") // 编译时报错, int不能跟String比

    // 优点2
    idAgeRDDPerson.filter(_.age > 25) // 直接操作一个个的person对象
  }
}


val spark = SparkSession 
   .builder() 
   .appName("SparkSessionZipsExample") 
   .config("spark.sql.warehouse.dir", warehouseLocation) 
   .enableHiveSupport() 
   .getOrCreate() 



-------------------------------------
scala> val numDS = spark.range(5, 100, 5) 
numDS: org.apache.spark.sql.Dataset[Long] = [id: bigint] 
  
scala> numDS.orderBy(desc("id")).show(5) 
+---+ 
| id| 
+---+ 
| 95| 
| 90| 
| 85| 
| 80| 
| 75| 
+---+ 
only showing top 5 rows 
  
scala> numDS.describe().show() 
+-------+------------------+ 
|summary|                id| 
+-------+------------------+ 
|  count|                19| 
|   mean|              50.0| 
| stddev|28.136571693556885| 
|    min|                 5| 
|    max|                95| 
+-------+------------------+ 
scala> val langPercentDF = spark.createDataFrame(List(("Scala", 35), 
     | ("Python", 30), ("R", 15), ("Java", 20))) 
langPercentDF: org.apache.spark.sql.DataFrame = [_1: string, _2: int] 
  
scala> val lpDF = langPercentDF.withColumnRenamed("_1", "language").withColumnRenamed("_2", "percent") 
lpDF: org.apache.spark.sql.DataFrame = [language: string, percent: int] 
  
scala> lpDF.orderBy(desc("percent")).show(false) 
+--------+-------+                                                               
|language|percent| 
+--------+-------+ 
|Scala   |35     | 


hadoop 状态查看地址:http://192.168.1.101:8088/
spark 状态查看地址:http://192.168.1.101:8082/

你可能感兴趣的:(spark)