文章為轉載,如有版權問題,請聯系,謝謝!
轉自:https://blog.csdn.net/piduzi/article/details/81636253
適合場景:在運行時才確定用哪個數據源
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import scala.collection.JavaConverters._
object ReadHive {
def main(args: Array[String]): Unit = {
val sparkBuilder = SparkSession
.builder
.master("local")
.appName("Spk Pi")
val conf = new Configuration()
// 這里的文件地址可以換成從數據庫里查詢
val core = new Path("C:\\Users\\shadow\\Desktop\\core-site.xml")
val hdfs = new Path("C:\\Users\\shadow\\Desktop\\hdfs-site.xml")
val hive = new Path("C:\\Users\\shadow\\Desktop\\hive-site.xml")
conf.addResource(core)
conf.addResource(hdfs)
conf.addResource(hive)
for (c <- conf.iterator().asScala){
sparkBuilder.config(c.getKey, c.getValue)
}
val spark = sparkBuilder.enableHiveSupport().getOrCreate()
spark.sql("select * from default.wt_test1").show() }
}