Spark SQL讀parquet文件及保存


import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.hive._

/**
  * Created by soyo on 17-10-12.
  */
case class Persons(name:String,age:Int)
case class Record(key: Int, value: String)

object rdd_to_dataframe_parquet {
  val warehouseLocation = "file:${system:user.dir}/spark-warehouse"
          val spark=SparkSession.builder().config("spark.sql.warehouse.dir",warehouseLocation).enableHiveSupport().getOrCreate()
          import spark.implicits._
  def main(args: Array[String]): Unit = {
        val df =spark.sparkContext.textFile("file:///home/soyo/桌面/spark編程測試數據/people.txt")
           .map(_.split(",")).map(x=>Person(x(0),x(1).trim.toInt)).toDF()
             df.write.parquet("file:///home/soyo/桌面/spark編程測試數據/people.parquet")
     val parquetFile=spark.read.parquet("file:///home/soyo/桌面/spark編程測試數據/people.parquet")
        parquetFile.createOrReplaceTempView("people")
      val result=spark.sql("select * from people")
    result.show()
spark.stop()

 補充:需要多數據源整合查詢時:

 val data=result1.union(result2)

data.createOrReplaceTempView("data")   之后執行后續查詢


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM