UDF函數
scala> val df=spark.read.json("people.json")
df: org.apache.spark.sql.DataFrame = [age: bigint, name: string]
scala> df.show
+---+------+
|age| name|
+---+------+
| 30| Andy|
| 19|Justin|
+---+------+
scala> spark.udf.register("addName",(x:String)=>"Name:"+x)
res50: org.apache.spark.sql.expressions.UserDefinedFunction = UserDefinedFunction(<function1>,StringType,Some(List(StringType)))
scala> df.createOrReplaceTempView("people")
scala> spark.sql("select addName(name),age from people").show
+-----------------+---+
|UDF:addName(name)|age|
+-----------------+---+
| Name:Andy| 30|
| Name:Justin| 19|
+-----------------+---+
UDAF函數
求平均值的自定義聚合函數
employees.json
{"name":"Michael", "salary":3000}
{"name":"Andy", "salary":4500}
{"name":"Justin", "salary":3500}
{"name":"Berta", "salary":4000}
弱類型用戶自定義聚合函數
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}
object MyAverage extends UserDefinedAggregateFunction {
//聚合函數輸入的類型
override def inputSchema: StructType = StructType(StructField("inputColumn", LongType) :: Nil)
//聚合函數緩沖區類型
override def bufferSchema: StructType = StructType(StructField("sum", LongType) :: StructField("column", LongType) :: Nil)
//返回值類型
override def dataType: DataType = DoubleType
//相同輸入是否返回相同輸出
override def deterministic: Boolean = true
//初始化
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = 0L
buffer(1) = 0L
}
//相同數據合並
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
if (!input.isNullAt(0)) {
buffer(0) = buffer.getLong(0) + input.getLong(0)
buffer(1) = buffer.getLong(1) + 1
}
}
//不同Execute之間的數據合並
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
}
//計算結果
override def evaluate(buffer: Row): Any = buffer.getLong(0).toDouble / buffer.getLong(1)
}
object test {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder()
.appName("sparksession")
.master("local[*]")
.getOrCreate()
val df = spark.read.json("F:\\BigData\\employees.json")
df.createOrReplaceTempView("employees")
spark.udf.register("MyAverage", MyAverage)
df.show()
spark.sql("select MyAverage(salary) from employees").show()
spark.stop()
}
}
結果如下:
強類型用戶自定義聚合函數
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
case class Employee(name: String, salary: Long)
case class Average(var sum: Long, var count: Long)
object MyAverage2 extends Aggregator[Employee, Average, Double] {
//定義一個數據結構,保存工資總數和工資總個數,初始都為0
override def zero: Average = Average(0L, 0L)
//統計數據
override def reduce(b: Average, a: Employee): Average = {
b.sum += a.salary
b.count += 1
b
}
//各個Execute數據匯總
override def merge(b1: Average, b2: Average): Average = {
b1.sum += b2.sum
b1.count += b2.count
b1
}
//計算輸出
override def finish(reduction: Average): Double = reduction.sum.toDouble / reduction.count
// 設定之間值類型的編碼器,要轉換成case類
// Encoders.product是進行scala元組和case類轉換的編碼器
override def bufferEncoder: Encoder[Average] = Encoders.product
//設置最終輸出編碼器
override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
object test2 {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder()
.appName("sparksession")
.master("local[*]")
.getOrCreate()
import spark.implicits._
val ds = spark.read.json("F:\\BigData\\employees.json").as[Employee]
ds.createOrReplaceTempView("employees")
ds.show()
ds.select(MyAverage2.toColumn.name("average_salary")).show()
spark.stop()
}
}
運行結果如下