springboot-集成spark


1.pom.xml

<dependency>
    <groupId>org.springframework.data</groupId>
    <artifactId>spring-data-hadoop</artifactId>
    <version>2.5.0.RELEASE</version>
</dependency>
<dependency>
    <groupId>org.apache.hbase</groupId>
    <artifactId>hbase-client</artifactId>
    <version>1.3.2</version>
    <exclusions>
        <exclusion>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.mortbay.jetty</groupId>
            <artifactId>servlet-api-2.5</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.mortbay.jetty</groupId>
            <artifactId>servlet-api-2.5-6.1.14</artifactId>
        </exclusion>
    </exclusions>
</dependency>
<dependency>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-hdfs</artifactId>
    <version>2.7.3</version>
    <exclusions>
        <exclusion>
            <artifactId>servlet-api</artifactId>
            <groupId>javax.servlet</groupId>
        </exclusion>
    </exclusions>
</dependency>

2.wordcount例子

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;
import java.util.List;

public class JavaSparkWordCount {
public static void main(String[] args) {
//配置執行
SparkConf conf = new SparkConf().setAppName("Java_WordCount");

// 創建SparkContext對象: JavaSparkContext
JavaSparkContext context = new JavaSparkContext(conf);

//讀入數據
JavaRDD<String> lines = context.textFile(args[0]);

//分詞
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {

// @Override
public Iterator<String> call(String line) throws Exception {
return Arrays.asList(line.split(" ")).iterator();
}

});

//每個單詞記一次數
JavaPairRDD<String, Integer> wordOne = words.mapToPair(new PairFunction<String, String, Integer>() {

// @Override
public Tuple2<String, Integer> call(String word) throws Exception {
return new Tuple2<String,Integer>(word,1);
}
});

//執行reduceByKey的操作
JavaPairRDD<String, Integer> count = wordOne.reduceByKey(new Function2<Integer, Integer, Integer>() {

// @Override
public Integer call(Integer i1, Integer i2) throws Exception {
return i1 + i2;
}
});


//執行計算,執行action操作: 把結果打印在屏幕上
List<Tuple2<String, Integer>> result = count.collect();

//輸出
for(Tuple2<String, Integer> tuple: result){
System.out.println(tuple._1+"\t"+tuple._2);
}

//停止SparkContext對象
context.stop();
}
}
3.打包運行

如果按springboot的maven打包方式打包,提交時報ClassNotFoundException

 bin/spark-submit --master spark://hadoop1:7077 --class com.JavaSparkWordCount /xxx/sparktest.jar /xxx/data.txt

WARN  SparkSubmit$$anon$2:87 - Failed to load com.JavaSparkWordCount.

java.lang.ClassNotFoundException: com.JavaSparkWordCount

at java.net.URLClassLoader.findClass(URLClassLoader.java:381)

解壓打出的jar包,發現main所在類文件不在根目錄,而是:在BOOT-INF\classes目錄。

正確的打包方式:

按F4,或者點擊idea的

 

 

配置完后,點build,build artifacts...打包

打完包可以在jar包的com目錄找到class文件。

再提交運行ok。

更多spark知識點,參見spark相關章節(spark知識點好像還沒寫博客,后續補上) 

————————————————
版權聲明:本文為CSDN博主「qq_41665356」的原創文章,遵循CC 4.0 BY-SA版權協議,轉載請附上原文出處鏈接及本聲明。
原文鏈接:https://blog.csdn.net/qq_41665356/article/details/89283645


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM