示例:Spark SQL自定义函数(UDF/UDAF)

文章目录

    • UDF函数
    • UDAF函数
        • 弱类型用户自定义聚合函数
        • 强类型用户自定义聚合函数

UDF函数

scala> val df=spark.read.json("people.json")
df: org.apache.spark.sql.DataFrame = [age: bigint, name: string]

scala> df.show
+---+------+
|age|  name|
+---+------+
| 30|  Andy|
| 19|Justin|
+---+------+

scala> spark.udf.register("addName",(x:String)=>"Name:"+x)
res50: org.apache.spark.sql.expressions.UserDefinedFunction = UserDefinedFunction(<function1>,StringType,Some(List(StringType)))

scala> df.createOrReplaceTempView("people")

scala> spark.sql("select addName(name),age from people").show
+-----------------+---+
|UDF:addName(name)|age|
+-----------------+---+
|        Name:Andy| 30|
|      Name:Justin| 19|
+-----------------+---+

UDAF函数

求平均值的自定义聚合函数
employees.json

{"name":"Michael", "salary":3000}
{"name":"Andy", "salary":4500}
{"name":"Justin", "salary":3500}
{"name":"Berta", "salary":4000}

弱类型用户自定义聚合函数

import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}

object MyAverage extends UserDefinedAggregateFunction {
  //聚合函数输入的类型
  override def inputSchema: StructType = StructType(StructField("inputColumn", LongType) :: Nil)

  //聚合函数缓冲区类型
  override def bufferSchema: StructType = StructType(StructField("sum", LongType) :: StructField("column", LongType) :: Nil)

  //返回值类型
  override def dataType: DataType = DoubleType

  //相同输入是否返回相同输出
  override def deterministic: Boolean = true

  //初始化
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer(0) = 0L
    buffer(1) = 0L
  }

  //相同数据合并
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    if (!input.isNullAt(0)) {
      buffer(0) = buffer.getLong(0) + input.getLong(0)
      buffer(1) = buffer.getLong(1) + 1
    }
  }

  //不同Execute之间的数据合并
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
    buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
  }

  //计算结果
  override def evaluate(buffer: Row): Any = buffer.getLong(0).toDouble / buffer.getLong(1)
  
}


object test {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("sparksession")
      .master("local[*]")
      .getOrCreate()
    val df = spark.read.json("F:\\BigData\\employees.json")
    df.createOrReplaceTempView("employees")
    spark.udf.register("MyAverage", MyAverage)
    df.show()

    spark.sql("select MyAverage(salary) from employees").show()
    spark.stop()
  }
}

结果如下:

示例:Spark SQL自定义函数(UDF/UDAF)_第1张图片
示例:Spark SQL自定义函数(UDF/UDAF)_第2张图片

强类型用户自定义聚合函数

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}

case class Employee(name: String, salary: Long)

case class Average(var sum: Long, var count: Long)

object MyAverage2 extends Aggregator[Employee, Average, Double] {
  //定义一个数据结构,保存工资总数和工资总个数,初始都为0
  override def zero: Average = Average(0L, 0L)

  //统计数据
  override def reduce(b: Average, a: Employee): Average = {
    b.sum += a.salary
    b.count += 1
    b
  }

  //各个Execute数据汇总
  override def merge(b1: Average, b2: Average): Average = {
    b1.sum += b2.sum
    b1.count += b2.count
    b1
  }

  //计算输出
  override def finish(reduction: Average): Double = reduction.sum.toDouble / reduction.count

  // 设定之间值类型的编码器,要转换成case类
  // Encoders.product是进行scala元组和case类转换的编码器
  override def bufferEncoder: Encoder[Average] = Encoders.product

  //设置最终输出编码器
  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

object test2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("sparksession")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._
    val ds = spark.read.json("F:\\BigData\\employees.json").as[Employee]
    ds.createOrReplaceTempView("employees")
    ds.show()

    ds.select(MyAverage2.toColumn.name("average_salary")).show()
    spark.stop()
  }
}

运行结果如下
示例:Spark SQL自定义函数(UDF/UDAF)_第3张图片
示例:Spark SQL自定义函数(UDF/UDAF)_第4张图片

你可能感兴趣的:(#,Spark)