# -*- coding:utf-8 -*- from pyspark import SparkContext, SparkConf from pyspark.sql import SQLContext import numpy as np appName = "jhl_spark_1" # 你的應用程序名稱 master = "local" # 設置單機 conf = SparkConf().setAppName(appName).setMaster(master) # 配置SparkContext sc = SparkContext(conf=conf) sqlContext = SQLContext(sc) url='jdbc:oracle:thin:@127.0.0.1:1521:ORCL' tablename='V_JSJQZ' properties={"user": "Xho", "password": "sys"} df=sqlContext.read.jdbc(url=url,table=tablename,properties=properties) #df=sqlContext.read.format("jdbc").option("url",url).option("dbtable",tablename).option("user","Xho").option("password","sys").load() #注冊為表,然后在SQL語句中使用 df.registerTempTable("v_jsjqz") #SQL可以在已注冊為表的RDDS上運行 df2=sqlContext.sql("select ZBLX,BS,JS,JG from v_jsjqz t order by ZBLX,BS") list_data=df2.toPandas()# 轉換格式toDataFrame list_data = list_data.dropna()# 清洗操作,去除有空值的數據 list_data = np.array(list_data).tolist()#tolist RDDv1=sc.parallelize(list_data)#並行化數據,轉化為RDD RDDv2=RDDv1.map(lambda x:(x[0]+'^'+x[1],[[float(x[2]),float(x[3])]])) RDDv3=RDDv2.reduceByKey(lambda a,b:a+b) sc.stop()
這里的 pyspark 是spark安裝的文件夾里python文件夾下的,需要復制到anoconda的Lib下site-packages中
代碼中沒有環境變量的配置,不願意在本機配置環境變量的可以去查查spark在python中環境變量配置