spark2.1:使用df.select(when(a===b,1).otherwise(0))替換(case when a==b then 1 else 0 end)


最近工作中把一些sql.sh腳本執行hive的語句升級為spark2.1版本,其中遇到將case when 替換為scala操作df的方式實現的問題:

代碼數據:

scala>     import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions._

scala>     import spark.implicits._
import spark.implicits._

scala>     case class fpb_servercls(gridid: String, height: Int, objectid: Int, rsrp: Double, calibrategridid: Int, calibartetype: String)
defined class fpb_servercls

scala>     
     |     val fpb_server_test = List(
     |       fpb_servercls("grid1", 0, 888888, -88, 53, null),
     |       fpb_servercls("grid1", 5, 888888, -99, 53, null),
     |       fpb_servercls("grid2", 0, 333333, -78, 53, null),
     |       fpb_servercls("grid4", 0, 444444, -78, 53, null)
     |     ).toDF
fpb_server_test: org.apache.spark.sql.DataFrame = [gridid: string, height: int ... 4 more fields]

scala>     val sampe_data_test = List(
     |       fpb_servercls("grid1", 0, 888888, -78, 53, "HOMEWIFI"),
     |       fpb_servercls("grid1", 5, 999999, -89, 53, null),
     |       fpb_servercls("grid2", 0, 333333, -87, 53, null)
     |     ).toDF
sampe_data_test: org.apache.spark.sql.DataFrame = [gridid: string, height: int ... 4 more fields]

錯誤代碼:

scala>         val temp_result = fpb_server_test.alias("fpb").join(sampe_data_test.alias("sample"),
     |           fpb_server_test("gridid") === sampe_data_test("gridid")
     |             && fpb_server_test("height") === sampe_data_test("height")
     |             && fpb_server_test("objectid") === sampe_data_test("objectid"), "left_outer")
| .select(
| fpb_server_test("gridid"), | fpb_server_test("height"), | fpb_server_test("objectid"), | when(sampe_data_test("gridid") === lit(null), fpb_server_test("rsrp")).otherwise(sampe_data_test("rsrp")).alias("rsrp"), | fpb_server_test("calibrategridid"), | when(sampe_data_test("gridid") === lit(null), fpb_server_test("calibartetype")).otherwise(sampe_data_test("calibartetype")).alias("f_calibartetype") | ) temp_result: org.apache.spark.sql.DataFrame = [gridid: string, height: int ... 4 more fields] scala> temp_result.show +------+------+--------+-----+---------------+---------------+ |gridid|height|objectid| rsrp|calibrategridid|f_calibartetype| +------+------+--------+-----+---------------+---------------+ | grid1| 0| 888888|-78.0| 53| HOMEWIFI| | grid1| 5| 888888| null| 53| null| | grid2| 0| 333333|-87.0| 53| null| | grid4| 0| 444444| null| 53| null| +------+------+--------+-----+---------------+---------------+

錯誤的願意就是這里的判定是否為空的地方。

正確用法:

scala>  val temp_result = fpb_server_test.alias("fpb").join(sampe_data_test.alias("sample"),
     |       fpb_server_test("gridid") === sampe_data_test("gridid")
     |         && fpb_server_test("height") === sampe_data_test("height")
     |         && fpb_server_test("objectid") === sampe_data_test("objectid"), "left_outer")
| .select(
| fpb_server_test("gridid"), | fpb_server_test("height"), | fpb_server_test("objectid"), | when(sampe_data_test("gridid").isNull, fpb_server_test("rsrp")).otherwise(sampe_data_test("rsrp")).alias("rsrp"), | fpb_server_test("calibrategridid"), | when(sampe_data_test("gridid").isNull, fpb_server_test("calibartetype")).otherwise(sampe_data_test("calibartetype")).alias("f_calibartetype") | ) temp_result: org.apache.spark.sql.DataFrame = [gridid: string, height: int ... 4 more fields] scala> temp_result.show +------+------+--------+-----+---------------+---------------+ |gridid|height|objectid| rsrp|calibrategridid|f_calibartetype| +------+------+--------+-----+---------------+---------------+ | grid1| 0| 888888|-78.0| 53| HOMEWIFI| | grid1| 5| 888888|-99.0| 53| null| | grid2| 0| 333333|-87.0| 53| null| | grid4| 0| 444444|-78.0| 53| null| +------+------+--------+-----+---------------+---------------+

疑問代碼,如下代碼在spark-shell中執行沒有問題,但是使用spark-submit提交腳本后就提示錯誤:

scala>   val temp_result = fpb_server_test.alias("fpb").join(sampe_data_test.alias("sample"),
     |       fpb_server_test("gridid") === sampe_data_test("gridid")
     |         && fpb_server_test("height") === sampe_data_test("height")
     |         && fpb_server_test("objectid") === sampe_data_test("objectid"), "left_outer")
| .selectExpr("fpb.gridid", "fpb.height", "fpb.objectid", | "(case when sample.gridid is null then fpb.rsrp else sample.rsrp end) as rsrp", | "fpb.calibrategridid", | "(case when sample.gridid is null then fpb.calibartetype else sample.calibartetype end) as calibartetype") temp_result: org.apache.spark.sql.DataFrame = [gridid: string, height: int ... 4 more fields] scala> temp_result.show +------+------+--------+-----+---------------+-------------+ |gridid|height|objectid| rsrp|calibrategridid|calibartetype| +------+------+--------+-----+---------------+-------------+ | grid1| 0| 888888|-78.0| 53| HOMEWIFI| | grid1| 5| 888888|-99.0| 53| null| | grid2| 0| 333333|-87.0| 53| null| | grid4| 0| 444444|-78.0| 53| null| +------+------+--------+-----+---------------+-------------+

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM