一、top3熱門商品實時統計案例
1、概述
Spark Streaming最強大的地方在於,可以與Spark Core、Spark SQL整合使用,之前已經通過transform、foreachRDD等算子看到,
如何將DStream中的RDD使用Spark Core執行批處理操作。現在就來看看,如何將DStream中的RDD與Spark SQL結合起來使用。
案例:每隔10秒,統計最近60秒的,每個種類的每個商品的點擊次數,然后統計出每個種類top3熱門的商品。
2、java案例
package cn.spark.study.streaming; import java.util.ArrayList; import java.util.List; import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.function.Function; import org.apache.spark.api.java.function.Function2; import org.apache.spark.api.java.function.PairFunction; import org.apache.spark.sql.DataFrame; import org.apache.spark.sql.Row; import org.apache.spark.sql.RowFactory; import org.apache.spark.sql.hive.HiveContext; import org.apache.spark.sql.types.DataTypes; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.streaming.Durations; import org.apache.spark.streaming.api.java.JavaPairDStream; import org.apache.spark.streaming.api.java.JavaReceiverInputDStream; import org.apache.spark.streaming.api.java.JavaStreamingContext; import scala.Tuple2; /** * 與Spark SQL整合使用,top3熱門商品實時統計 * @author Administrator * */ public class Top3HotProduct { public static void main(String[] args) { SparkConf conf = new SparkConf() .setMaster("local[2]") .setAppName("Top3HotProduct"); JavaStreamingContext jssc = new JavaStreamingContext(conf, Durations.seconds(1)); // 首先看一下,輸入日志的格式 // leo iphone mobile_phone // 首先,獲取輸入數據流 // 這里順帶提一句,之前沒有講過,就是說,我們的Spark Streaming的案例為什么都是基於socket的呢? // 因為方便啊。。。 // 其實,企業里面,真正最常用的,都是基於Kafka這種數據源 // 但是我覺得我們的練習,用socket也無妨,比較方便,而且一點也不影響學習 // 因為不同的輸入來源的,不同之處,只是在創建輸入DStream的那一點點代碼 // 所以,核心是在於之后的Spark Streaming的實時計算 // 所以只要我們掌握了各個案例和功能的使用 // 在企業里,切換到Kafka,易如反掌,因為我們之前都詳細講過,而且實驗過,實戰編碼過,將Kafka作為 // 數據源的兩種方式了 // 獲取輸入數據流 JavaReceiverInputDStream<String> productClickLogsDStream = jssc.socketTextStream("spark1", 9999); // 然后,應該是做一個映射,將每個種類的每個商品,映射為(category_product, 1)的這種格式 // 從而在后面可以使用window操作,對窗口中的這種格式的數據,進行reduceByKey操作 // 從而統計出來,一個窗口中的每個種類的每個商品的,點擊次數 JavaPairDStream<String, Integer> categoryProductPairsDStream = productClickLogsDStream .mapToPair(new PairFunction<String, String, Integer>() { private static final long serialVersionUID = 1L; @Override public Tuple2<String, Integer> call(String productClickLog) throws Exception { String[] productClickLogSplited = productClickLog.split(" "); return new Tuple2<String, Integer>(productClickLogSplited[2] + "_" + productClickLogSplited[1], 1); } }); // 然后執行window操作 // 到這里,就可以做到,每隔10秒鍾,對最近60秒的數據,執行reduceByKey操作 // 計算出來這60秒內,每個種類的每個商品的點擊次數 JavaPairDStream<String, Integer> categoryProductCountsDStream = categoryProductPairsDStream.reduceByKeyAndWindow( new Function2<Integer, Integer, Integer>() { private static final long serialVersionUID = 1L; @Override public Integer call(Integer v1, Integer v2) throws Exception { return v1 + v2; } }, Durations.seconds(60), Durations.seconds(10)); // 然后針對60秒內的每個種類的每個商品的點擊次數 // foreachRDD,在內部,使用Spark SQL執行top3熱門商品的統計 categoryProductCountsDStream.foreachRDD(new Function<JavaPairRDD<String,Integer>, Void>() { private static final long serialVersionUID = 1L; @Override public Void call(JavaPairRDD<String, Integer> categoryProductCountsRDD) throws Exception { // 將該RDD,轉換為JavaRDD<Row>的格式 JavaRDD<Row> categoryProductCountRowRDD = categoryProductCountsRDD.map( new Function<Tuple2<String,Integer>, Row>() { private static final long serialVersionUID = 1L; @Override public Row call(Tuple2<String, Integer> categoryProductCount) throws Exception { String category = categoryProductCount._1.split("_")[0]; String product = categoryProductCount._1.split("_")[1]; Integer count = categoryProductCount._2; return RowFactory.create(category, product, count); } }); // 然后,執行DataFrame轉換 List<StructField> structFields = new ArrayList<StructField>(); structFields.add(DataTypes.createStructField("category", DataTypes.StringType, true)); structFields.add(DataTypes.createStructField("product", DataTypes.StringType, true)); structFields.add(DataTypes.createStructField("click_count", DataTypes.IntegerType, true)); StructType structType = DataTypes.createStructType(structFields); HiveContext hiveContext = new HiveContext(categoryProductCountsRDD.context()); DataFrame categoryProductCountDF = hiveContext.createDataFrame( categoryProductCountRowRDD, structType); // 將60秒內的每個種類的每個商品的點擊次數的數據,注冊為一個臨時表 categoryProductCountDF.registerTempTable("product_click_log"); // 執行SQL語句,針對臨時表,統計出來每個種類下,點擊次數排名前3的熱門商品 DataFrame top3ProductDF = hiveContext.sql( "SELECT category,product,click_count " + "FROM (" + "SELECT " + "category," + "product," + "click_count," + "row_number() OVER (PARTITION BY category ORDER BY click_count DESC) rank " + "FROM product_click_log" + ") tmp " + "WHERE rank<=3"); // 這里說明一下,其實在企業場景中,可以不是打印的 // 案例說,應該將數據保存到redis緩存、或者是mysql db中 // 然后,應該配合一個J2EE系統,進行數據的展示和查詢、圖形報表 top3ProductDF.show(); return null; } }); jssc.start(); jssc.awaitTermination(); jssc.close(); } }
3、scala案例
package cn.spark.study.streaming import org.apache.spark.SparkConf import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.Seconds import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType import org.apache.spark.sql.types.StructField import org.apache.spark.sql.types.StringType import org.apache.spark.sql.types.IntegerType import org.apache.spark.sql.hive.HiveContext /** * @author Administrator */ object Top3HotProduct { def main(args: Array[String]): Unit = { val conf = new SparkConf() .setMaster("local[2]") .setAppName("Top3HotProduct") val ssc = new StreamingContext(conf, Seconds(1)) val productClickLogsDStream = ssc.socketTextStream("spark1", 9999) val categoryProductPairsDStream = productClickLogsDStream .map { productClickLog => (productClickLog.split(" ")(2) + "_" + productClickLog.split(" ")(1), 1)} val categoryProductCountsDStream = categoryProductPairsDStream.reduceByKeyAndWindow( (v1: Int, v2: Int) => v1 + v2, Seconds(60), Seconds(10)) categoryProductCountsDStream.foreachRDD(categoryProductCountsRDD => { val categoryProductCountRowRDD = categoryProductCountsRDD.map(tuple => { val category = tuple._1.split("_")(0) val product = tuple._1.split("_")(1) val count = tuple._2 Row(category, product, count) }) val structType = StructType(Array( StructField("category", StringType, true), StructField("product", StringType, true), StructField("click_count", IntegerType, true))) val hiveContext = new HiveContext(categoryProductCountsRDD.context) val categoryProductCountDF = hiveContext.createDataFrame(categoryProductCountRowRDD, structType) categoryProductCountDF.registerTempTable("product_click_log") val top3ProductDF = hiveContext.sql( "SELECT category,product,click_count " + "FROM (" + "SELECT " + "category," + "product," + "click_count," + "row_number() OVER (PARTITION BY category ORDER BY click_count DESC) rank " + "FROM product_click_log" + ") tmp " + "WHERE rank<=3") top3ProductDF.show() }) ssc.start() ssc.awaitTermination() } }