前提:啟動zookeeper和kafka,創建topic為wc
1、MysqlConnectPool.scala
package sparkstreaming import java.sql.{Connection, DriverManager, ResultSet, Statement} import java.text.SimpleDateFormat import java.util.Date /** * @author yangwj * @date 2020/8/5 10:25 */ class MysqlConnectPool { private var connection: Connection = _ private val driver = "com.mysql.jdbc.Driver" private val url = "jdbc:mysql://localhost:3306/spark?useUnicode=true&characterEncoding=utf-8&useSSL=false&autoReconnect=true" private val username = "root" private val password = "yang156122" /*** 創建mysql連接 ** @return*/ def conn(): Connection = { if (connection == null) { println(this.driver) Class.forName(this.driver) connection = DriverManager.getConnection(this.url, this.username, this.password) } connection } //關閉連接 def close(conn: Connection,stat: Statement): Unit = { try { if(!stat.isClosed() || stat!=null){ stat.close() } if (!conn.isClosed() || conn != null) { conn.close() } } catch { case ex: Exception => { ex.printStackTrace() } } } //當前時間 def currentTime():String = { val date = new Date() val time: String = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(date) time } //添加 def insert(stat: Statement,field:String,times:Int):Int = { try { val result: Int = stat.executeUpdate(s"INSERT INTO `spark_test`( `create_time`,`field`, `times`) VALUES ( '${currentTime()}','${field}', ${times})") println(s"添加數據,返回值=>" + result) result }catch { case ex: Exception => { ex.printStackTrace() // 打印到標准err System.err.println("exception===>: ...") // 打印到標准err 0 } } } //刪除 def delete(stat: Statement,field:String) = { try { val result: Int = stat.executeUpdate(s"DELETE FROM `spark_test` WHERE `field` = '${field}'") println(s"刪除數據,返回值=>" + result) }catch { case ex: Exception => { ex.printStackTrace() // 打印到標准err System.err.println("exception===>: ...") // 打印到標准err } } } //查詢 def selectByField(stat: Statement,field:String):ResultSet = { try { val result= stat.executeQuery(s"select * FROM `spark_test` WHERE `field` = '${field}'") println(s"查詢數據,返回值=>" + result.wasNull()) result }catch { case ex: Exception => { ex.printStackTrace() // 打印到標准err System.err.println("exception===>: ...") // 打印到標准err null //todo 有待改進 } } } def update(stat: Statement,field:String,times:Int): Unit = { try { val resUpdate = stat.executeUpdate(s"UPDATE `spark_test` SET `times` = '${times}' WHERE `field` = '${field}'") println(s"更新數據,返回值=>" + resUpdate) }catch { case ex: Exception => { ex.printStackTrace() // 打印到標准err System.err.println("exception===>: ...") // 打印到標准err } } } }
2、KafkaDirectWordCountV3.scala
package sparkstreaming import java.sql.{Connection, ResultSet, Statement} import kafka.common.TopicAndPartition import kafka.message.MessageAndMetadata import kafka.serializer.StringDecoder import kafka.utils.{ZKGroupTopicDirs, ZkUtils} import org.I0Itec.zkclient.ZkClient import org.apache.spark.SparkConf import org.apache.spark.rdd.RDD import org.apache.spark.streaming.dstream.InputDStream import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils, OffsetRange} import org.apache.spark.streaming.{Duration, StreamingContext} object KafkaDirectWordCountV3 { def main(args: Array[String]): Unit = { //指定組名 val group = "g001" //創建SparkConf val conf = new SparkConf().setAppName("KafkaDirectWordCount").setMaster("local[2]") //創建SparkStreaming,並設置間隔時間 val ssc = new StreamingContext(conf, Duration(5000)) //指定消費的 topic 名字 val topic = "wc" //指定kafka的broker地址(sparkStream的Task直連到kafka的分區上,用更加底層的API消費,效率更高) val brokerList = "localhost:9092" //指定zk的地址,后期更新消費的偏移量時使用(以后可以使用Redis、MySQL來記錄偏移量) val zkQuorum = "localhost:2181" //創建 stream 時使用的 topic 名字集合,SparkStreaming可同時消費多個topic val topics: Set[String] = Set(topic) //創建一個 ZKGroupTopicDirs 對象,其實是指定往zk中寫入數據的目錄,用於保存偏移量 val topicDirs = new ZKGroupTopicDirs(group, topic) //獲取 zookeeper 中的路徑 "/g001/offsets/wc/" val zkTopicPath = s"${topicDirs.consumerOffsetDir}" //准備kafka的參數 val kafkaParams = Map( //"deserializer.encoding" -> "GBK", "metadata.broker.list" -> brokerList, "group.id" -> group, //從頭開始讀取數據 "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString ) //zookeeper 的host 和 ip,創建一個 client,用於跟新偏移量量的 //是zookeeper的客戶端,可以從zk中讀取偏移量數據,並更新偏移量 val zkClient = new ZkClient(zkQuorum) //查詢該路徑下是否字節點(默認有字節點為我們自己保存不同 partition 時生成的) // /g001/offsets/wordcount/0/10001" // /g001/offsets/wordcount/1/30001" // /g001/offsets/wordcount/2/10001" //zkTopicPath -> /g001/offsets/wordcount/ val children = zkClient.countChildren(zkTopicPath) var kafkaStream: InputDStream[(String, String)] = null //如果 zookeeper 中有保存 offset,我們會利用這個 offset 作為 kafkaStream 的起始位置 var fromOffsets: Map[TopicAndPartition, Long] = Map() //如果保存過 offset //注意:偏移量的查詢是在Driver完成的 if (children > 0) { for (i <- 0 until children) { // /g001/offsets/wordcount/0/10001 // /g001/offsets/wordcount/0 println(s"路徑:${zkTopicPath}") val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}") // wordcount/0 val tp = TopicAndPartition(topic, i) //將不同 partition 對應的 offset 增加到 fromOffsets 中 // wordcount/0 -> 10001 fromOffsets += (tp -> partitionOffset.toLong) } //Key: kafka的key values: "hello tom hello jerry" //這個會將 kafka 的消息進行 transform,最終 kafak 的數據都會變成 (kafka的key, message) 這樣的 tuple val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key(), mmd.message()) //通過KafkaUtils創建直連的DStream(fromOffsets參數的作用是:按照前面計算好了的偏移量繼續消費數據) //[String, String, StringDecoder, StringDecoder, (String, String)] // key value key的解碼方式 value的解碼方式 kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler) } else { //如果未保存,根據 kafkaParam 的配置使用最新(largest)或者最舊的(smallest) offset kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics) } //偏移量的范圍 var offsetRanges = Array[OffsetRange]() //如果你調用了DStream的Transformation,就不能使用直連方式 // val ds = kafkaStream.map(_._2).flatMap(_.split(" ")).map((_, 1)) // ds.foreachRDD(rdd => { // //當前的這個RDD已經不是KafkaRDD了,就不能獲取到從kafka中讀取的偏移量 // offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges // }) // //直連方式只有在KafkaDStream的RDD(KafkaRDD)中才能獲取偏移量,那么就不能到調用DStream的Transformation //所以只能子在kafkaStream調用foreachRDD,獲取RDD的偏移量,然后就是對RDD進行操作了 //依次迭代KafkaDStream中的KafkaRDD //如果使用直連方式累加數據,那么就要在外部的數據庫中進行累加(用KeyVlaue的內存數據庫(NoSQL),Redis) kafkaStream.foreachRDD { kafkaRDD => //只有KafkaRDD可以強轉成HasOffsetRanges,並獲取到偏移量 offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges //獲取數據 val lines: RDD[String] = kafkaRDD.map(_._2) //對RDD進行操作,觸發Action lines.foreachPartition(partition => partition.foreach(x => { //寫業務邏輯 ---來一個wc吧 println(x) val tuples: Array[(String, Int)] = x.split(" ").map((_, 1)) val grouped: Map[String, Array[(String, Int)]] = tuples.groupBy(_._1) val wordAndCount: Map[String, Int] = grouped.mapValues(_.length) val sorted: List[(String, Int)] = wordAndCount.toList.sortBy(- _._2) //寫入mysql for (i <- 0 until sorted.size) { val pool = new MysqlConnectPool val conn: Connection = pool.conn() val stat: Statement = conn.createStatement() var field = sorted(i)._1 val times = sorted(i)._2 println(s"數據更新開始...........") val set: ResultSet = pool.selectByField(stat,field) if(set.next()){ println(s"數據更新循環") val dbfield: String = set.getString("field") val dbtimes: Int = set.getInt("times") println(s"數據庫查出數據為:dbfield=${dbfield},dbtimes=${dbtimes}") val resUpdate = pool.update(stat,field,times+dbtimes) }else { println(s"數據入庫開始...........") val result: Int =pool.insert(stat,field,times) } pool.close(conn,stat) } }) ) for (o <- offsetRanges) { // /g001/offsets/wordcount/0 val zkPath = s"${topicDirs.consumerOffsetDir}/${o.partition}" //將該 partition 的 offset 保存到 zookeeper // /g001/offsets/wordcount/0/20000 println(s"保存的路徑為:${zkPath},保存的偏移量為:${o.untilOffset.toString}") ZkUtils.updatePersistentPath(zkClient, zkPath, o.untilOffset.toString) } } ssc.start() ssc.awaitTermination() } }
3、pom.xml文件

<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.yangwj</groupId> <artifactId>spark</artifactId> <version>1.0-SNAPSHOT</version> <properties> <maven.compiler.source>1.8</maven.compiler.source> <maven.compiler.target>1.8</maven.compiler.target> <scala.version>2.11.8</scala.version> <spark.version>2.2.0</spark.version> <encoding>UTF-8</encoding> <java.version>1.8</java.version> <hadoop.version>2.7.7</hadoop.version> <hbase.version>2.0.5</hbase.version> <spring-data-hadoop.version>2.4.0</spring-data-hadoop.version> </properties> <dependencies> <!-- ml庫--> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-streaming-kafka-0-8_2.11</artifactId> <version>${spark.version}</version> </dependency> <!-- spark-streaming--> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-streaming_2.11</artifactId> <version>${spark.version}</version> </dependency> <!-- ml庫--> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-mllib_2.11</artifactId> <version>${spark.version}</version> </dependency> <dependency> <groupId>org.codehaus.janino</groupId> <artifactId>janino</artifactId> <version>3.0.8</version> </dependency> <!-- 導入scala的依賴 --> <dependency> <groupId>org.scala-lang</groupId> <artifactId>scala-library</artifactId> <version>${scala.version}</version> </dependency> <!-- 導入spark的依賴 --> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-core_2.11</artifactId> <version>${spark.version}</version> </dependency> <!-- 導入sparksql的依賴 --> <dependency> <groupId>org.apache.spark</groupId> <artifactId>spark-sql_2.11</artifactId> <version>${spark.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/redis.clients/jedis --> <dependency> <groupId>redis.clients</groupId> <artifactId>jedis</artifactId> <version>2.9.0</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>5.1.12</version> </dependency> </dependencies> </project>