007 Dataset

scala> // Define the case classes for using in conjunction with DataFrames and Dataset

scala> case class Trans(accNo: String, tranAmount: Double)
defined class Trans

scala> // Creation of the list from where the Dataset is going to be created using a case class.

scala> val acTransList = Seq(Trans("SB10001", 1000), Trans("SB10002",1200), Trans("SB10003", 8000), Trans("SB10004",400), Trans("SB10005",300),Trans("SB10006",10000), Trans("SB10007",500), Trans("SB10008",56),Trans("SB10009",30),Trans("SB10010",7000), Trans("CR10001",7000),Trans("SB10002",-10))
acTransList: Seq[Trans] = List(Trans(SB10001,1000.0), Trans(SB10002,1200.0), Trans(SB10003,8000.0), Trans(SB10004,400.0), Trans(SB10005,300.0), Trans(SB10006,10000.0), Trans(SB10007,500.0), Trans(SB10008,56.0), Trans(SB10009,30.0), Trans(SB10010,7000.0), Trans(CR10001,7000.0), Trans(SB10002,-10.0))

scala> // Create the Dataset

scala> val acTransDS = acTransList.toDS()
acTransDS: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> acTransDS.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10001|    1000.0|
|SB10002|    1200.0|
|SB10003|    8000.0|
|SB10004|     400.0|
|SB10005|     300.0|
|SB10006|   10000.0|
|SB10007|     500.0|
|SB10008|      56.0|
|SB10009|      30.0|
|SB10010|    7000.0|
|CR10001|    7000.0|
|SB10002|     -10.0|
+-------+----------+


scala> // Apply filter and create another Dataset of good transaction records

scala> val goodTransRecords = acTransDS.filter(_.tranAmount > 0).filter(_.accNo.startsWith("SB"))
goodTransRecords: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> goodTransRecords.show()
+-------+----------+                                                            
|  accNo|tranAmount|
+-------+----------+
|SB10001|    1000.0|
|SB10002|    1200.0|
|SB10003|    8000.0|
|SB10004|     400.0|
|SB10005|     300.0|
|SB10006|   10000.0|
|SB10007|     500.0|
|SB10008|      56.0|
|SB10009|      30.0|
|SB10010|    7000.0|
+-------+----------+


scala> // Apply filter and create another Dataset of high value transaction records

scala> val highValueTransRecords = goodTransRecords.filter(_.tranAmount > 1000)
highValueTransRecords: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> highValueTransRecords.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10002|    1200.0|
|SB10003|    8000.0|
|SB10006|   10000.0|
|SB10010|    7000.0|
+-------+----------+


scala> // The function that identifies the bad amounts

scala> val badAmountLambda = (trans: Trans) => trans.tranAmount <= 0
badAmountLambda: Trans => Boolean = 

scala> // The function that identifies bad accounts

scala> val badAcNoLambda = (trans: Trans) => trans.accNo.startsWith("SB") == false
badAcNoLambda: Trans => Boolean = 

scala> // The function that identifies bad accounts

scala> val badAcNoLambda = (trans: Trans) => trans.accNo.startsWith("SB") == false
badAcNoLambda: Trans => Boolean = 

scala> // Apply filter and create another Dataset of bad amount records

scala> val badAmountRecords = acTransDS.filter(badAmountLambda)
badAmountRecords: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> badAmountRecords.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10002|     -10.0|
+-------+----------+


scala> // Apply filter and create another Dataset of bad account records

scala> val badAccountRecords = acTransDS.filter(badAcNoLambda)
badAccountRecords: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> badAccountRecords.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|CR10001|    7000.0|
+-------+----------+


scala> // Do the union of two Dataset and create another Dataset

scala> val badTransRecords = badAmountRecords.union(badAccountRecords)
badTransRecords: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> badTransRecords.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10002|     -10.0|
|CR10001|    7000.0|
+-------+----------+


scala> // Calculate the sum

scala> val sumAmount = goodTransRecords.map(trans => trans.tranAmount).reduce(_ + _)
sumAmount: Double = 28486.0

scala> // Calculate the maximum

scala> val maxAmount = goodTransRecords.map(trans => trans.tranAmount).reduce((a, b) => if (a > b) a else b)
maxAmount: Double = 10000.0

scala> // Calculate the minimum

scala> val minAmount = goodTransRecords.map(trans => trans.tranAmount).reduce((a, b) => if (a < b) a else b)
minAmount: Double = 30.0

scala> // Convert the Dataset to DataFrame

scala> val acTransDF = acTransDS.toDF()
acTransDF: org.apache.spark.sql.DataFrame = [accNo: string, tranAmount: double]

scala> acTransDF.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10001|    1000.0|
|SB10002|    1200.0|
|SB10003|    8000.0|
|SB10004|     400.0|
|SB10005|     300.0|
|SB10006|   10000.0|
|SB10007|     500.0|
|SB10008|      56.0|
|SB10009|      30.0|
|SB10010|    7000.0|
|CR10001|    7000.0|
|SB10002|     -10.0|
+-------+----------+


scala> // Use Spark SQL to find out invalid transaction records

scala> acTransDF.createOrReplaceTempView("trans")

scala> val invalidTransactions = spark.sql("SELECT accNo, tranAmount FROM trans WHERE (accNo NOT LIKE 'SB%') OR tranAmount <= 0")
19/12/02 22:29:36 WARN ObjectStore: Failed to get database global_temp, returning NoSuchObjectException
invalidTransactions: org.apache.spark.sql.DataFrame = [accNo: string, tranAmount: double]

scala> invalidTransactions.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|CR10001|    7000.0|
|SB10002|     -10.0|
+-------+----------+


scala> // Interoperability of RDD, DataFrame and Dataset

scala> // Create RDD

scala> val acTransRDD = sc.parallelize(acTransList)
acTransRDD: org.apache.spark.rdd.RDD[Trans] = ParallelCollectionRDD[41] at parallelize at :26

scala> // Convert RDD to DataFrame

scala> val acTransRDDtoDF = acTransRDD.toDF()
acTransRDDtoDF: org.apache.spark.sql.DataFrame = [accNo: string, tranAmount: double]

scala> // Convert the DataFrame to Dataset with the type checking

scala> val acTransDFtoDS = acTransRDDtoDF.as[Trans]
acTransDFtoDS: org.apache.spark.sql.Dataset[Trans] = [accNo: string, tranAmount: double]

scala> acTransDFtoDS.show()
+-------+----------+
|  accNo|tranAmount|
+-------+----------+
|SB10001|    1000.0|
|SB10002|    1200.0|
|SB10003|    8000.0|
|SB10004|     400.0|
|SB10005|     300.0|
|SB10006|   10000.0|
|SB10007|     500.0|
|SB10008|      56.0|
|SB10009|      30.0|
|SB10010|    7000.0|
|CR10001|    7000.0|
|SB10002|     -10.0|
+-------+----------+

你可能感兴趣的:(007 Dataset)