使用newAPIHadoopRDD接口訪問hbase數據,網上有很多可以參考的例子,但是由於環境使用了kerberos安全加固,spark使用有kerberos認證的hbase,網上的參考資料不多,訪問hbase時,有些需要注意的地方,這里簡單記錄下最后的實現方案以及實現過程中遇到的坑,博客有kerberos認證hbase在spark環境下的使用提供了很大的幫助!!!
環境及版本信息
CDH6.2.1大數據集群(包含yarn、spark、hdfs等組件)
項目pom文件
首先說明的是不需要安裝scala,本地local模式運行時,在pom中直接添加scala運行時依賴即可;另外最終應用是放到集群中運行的,CDH Spark中的lib中都存在scala、spark-core、spark-sql等相關依賴,所以在pom文件中都作為provided屬性添加,即編譯時使用。
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.css.bigdata</groupId>
<artifactId>data-compare</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<version.hbase>2.1.0-cdh6.2.1</version.hbase>
<version.hadoop>3.0.0-cdh6.2.1</version.hadoop>
<maven.compiler.source>1.8</maven.compiler.source>
<version.scala>2.11</version.scala>
<version.scala.libray>2.11.12</version.scala.libray>
<version.spark>2.4.0-cdh6.2.1</version.spark>
</properties>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${version.scala.libray}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${version.scala}</artifactId>
<version>${version.spark}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${version.scala}</artifactId>
<version>${version.spark}</version>
<scope>provided</scope>
</dependency>
<!--HBase -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${version.hbase}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${version.hbase}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-mapreduce</artifactId>
<version>${version.hbase}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>
<!-- 分離資源文件 -->
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<executions>
<execution>
<id>copy-resources</id>
<phase>package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<resources>
<resource>
<directory>src/main/resources</directory>
</resource>
</resources>
<outputDirectory>${project.build.directory}/conf</outputDirectory>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<configuration>
<archive>
<manifestEntries>
<Class-Path>../conf/</Class-Path>
</manifestEntries>
<manifest>
<addClasspath>true</addClasspath>
<classpathPrefix>../lib/</classpathPrefix>
<mainClass>com.css.bigdata.dataCompare.HBaseCompare</mainClass>
</manifest>
</archive>
</configuration>
</plugin>
<plugin>
<!--這個插件就是把依賴的jar包復制出來放到編譯后的target/lib目錄,並且在打包時候排除內部依賴 -->
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>prepare-package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/lib</outputDirectory>
<overWriteReleases>false</overWriteReleases>
<overWriteSnapshots>false</overWriteSnapshots>
<overWriteIfNewer>true</overWriteIfNewer>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
HBaseUtil類
package com.css.bigdata.dataCompare.hbase;
import com.css.bigdata.dataCompare.Constant;
import com.css.bigdata.dataCompare.util.KerberosCheckUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class HBaseUtil {
public static Logger logger = LoggerFactory.getLogger(HBaseUtil.class);
public static Configuration getHbaseConfiguration(String cluster){
Configuration hbaseConf = HBaseConfiguration.create();
//調整部分配置
String hbaseIp = cluster;
hbaseConf.set("hbase.zookeeper.quorum", hbaseIp + ":2181");
hbaseConf.set("hbase.master", hbaseIp+":60000");
//避免超時
hbaseConf.set("hbase.rpc.timeout", "10000");//10s
hbaseConf.set("hbase.client.retries.number", "2");
hbaseConf.set("hbase.client.operation.timeout", "10000");
return hbaseConf;
}
public static void kerberosLogin(Configuration hbConf){
//kerbose
hbConf.set("hadoop.security.authentication", "Kerberos");
hbConf.set("hbase.security.authentication", "kerberos");
hbConf.set("hbase.master.kerberos.principal", "hbase/_HOST@CVBG.COM");
hbConf.set("hbase.regionserver.kerberos.principal", "hbase/_HOST@CVBG.COM");
System.setProperty("javax.security.auth.useSubjectCredOnly", "false");
System.setProperty("java.security.krb5.conf", KerberosCheckUtil.getKrb5Conf());
try{
UserGroupInformation.setConfiguration(hbConf);
if (UserGroupInformation.isLoginKeytabBased() && UserGroupInformation.getLoginUser().getUserName().equals(KerberosCheckUtil.principal)) {
logger.info("hbase:" + hbConf.get("hbase.master")+ ",user [{}] is login already!",KerberosCheckUtil.principal);
}else {
UserGroupInformation.loginUserFromKeytab(KerberosCheckUtil.principal, KerberosCheckUtil.getKeyTabFile());
logger.info("hbase:" + hbConf.get("hbase.master") + ",user [{}] login successed!",KerberosCheckUtil.principal);
}
}catch (IOException e){
e.printStackTrace();
logger.error("kerbose登錄報錯," + KerberosCheckUtil.getKeyTabFile());
System.exit(1);
}
}
public static User getAuthenticatedUser(){
User loginedUser = null;
try {
logger.info("=====put the logined userinfomation to user====");
loginedUser = User.create(UserGroupInformation.getLoginUser());
} catch (IOException e) {
logger.error("===fialed put the logined userinfomation to user===",e);
}
return loginedUser;
}
}
KerberosCheckUtil類
package com.css.bigdata.dataCompare.util;
public class KerberosCheckUtil {
//主體
public static String principal="dw_hbkal@CVBG.COM";
//秘鑰文件
public static String keyTabFileName="dw_hbkal.tab";
//默認配置文件
public static String krb5Conf= "krb5.conf";
public static String getKeyTabFile() {
String runPath = KerberosCheckUtil.class.getResource("/").getPath();
return runPath + keyTabFileName;
//return "file:///root/przhang/dw_hbkal.keytab";
}
public static String getKrb5Conf() {
String runPath = KerberosCheckUtil.class.getResource("/").getPath();
return runPath + krb5Conf;
//return "fie:///root/przhang/krb5.conf";
}
}
KerberosTableInputFormat類
該類直接拷貝了org.apache.hadoop.hbase.mapreduce.TableInputFormat類的代碼,作了兩處修改:1.在setConf方法中進行了kerberos認證,並獲取認證的用戶;2.在創建hbase連接的地方,將經過認證的user,加入到connection中,然后使用這個connection即可對hbase進行讀寫操作
package com.css.bigdata.dataCompare.hbase;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.StringUtils;
/**
* Convert HBase tabular data into a format that is consumable by Map/Reduce.
*/
@InterfaceAudience.Public
public class KerberosTableInputFormat extends TableInputFormatBase
implements Configurable {
@SuppressWarnings("hiding")
private static final Logger LOG = LoggerFactory.getLogger(KerberosTableInputFormat.class);
/** Job parameter that specifies the input table. */
public static final String INPUT_TABLE = "hbase.mapreduce.inputtable";
/**
* If specified, use start keys of this table to split.
* This is useful when you are preparing data for bulkload.
*/
private static final String SPLIT_TABLE = "hbase.mapreduce.splittable";
/** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified.
* See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details.
*/
public static final String SCAN = "hbase.mapreduce.scan";
/** Scan start row */
public static final String SCAN_ROW_START = "hbase.mapreduce.scan.row.start";
/** Scan stop row */
public static final String SCAN_ROW_STOP = "hbase.mapreduce.scan.row.stop";
/** Column Family to Scan */
public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family";
/** Space delimited list of columns and column families to scan. */
public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
/** The timestamp used to filter columns with a specific timestamp. */
public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp";
/** The starting timestamp used to filter columns with a specific range of versions. */
public static final String SCAN_TIMERANGE_START = "hbase.mapreduce.scan.timerange.start";
/** The ending timestamp used to filter columns with a specific range of versions. */
public static final String SCAN_TIMERANGE_END = "hbase.mapreduce.scan.timerange.end";
/** The maximum number of version to return. */
public static final String SCAN_MAXVERSIONS = "hbase.mapreduce.scan.maxversions";
/** Set to false to disable server-side caching of blocks for this scan. */
public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks";
/** The number of rows for caching that will be passed to scanners. */
public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows";
/** Set the maximum number of values to return for each call to next(). */
public static final String SCAN_BATCHSIZE = "hbase.mapreduce.scan.batchsize";
/** Specify if we have to shuffle the map tasks. */
public static final String SHUFFLE_MAPS = "hbase.mapreduce.inputtable.shufflemaps";
/** The configuration. */
private Configuration conf = null;
/** The kerberos authenticated user*/
private User user;
/**
* Returns the current configuration.
*
* @return The current configuration.
* @see org.apache.hadoop.conf.Configurable#getConf()
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Sets the configuration. This is used to set the details for the table to
* be scanned.
*
* @param configuration The configuration to set.
* @see org.apache.hadoop.conf.Configurable#setConf(
* org.apache.hadoop.conf.Configuration)
*/
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION",
justification="Intentional")
public void setConf(Configuration configuration) {
this.conf = configuration;
//=========get kerberos authentication before create hbase connection==========
HBaseUtil.kerberosLogin(conf);
user = HBaseUtil.getAuthenticatedUser();
Scan scan = null;
if (conf.get(SCAN) != null) {
try {
scan = TableMapReduceUtil.convertStringToScan(conf.get(SCAN));
} catch (IOException e) {
LOG.error("An error occurred.", e);
}
} else {
try {
scan = createScanFromConfiguration(conf);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
setScan(scan);
}
/**
* Sets up a {@link Scan} instance, applying settings from the configuration property
* constants defined in {@code TableInputFormat}. This allows specifying things such as:
* <ul>
* <li>start and stop rows</li>
* <li>column qualifiers or families</li>
* <li>timestamps or timerange</li>
* <li>scanner caching and batch size</li>
* </ul>
*/
public static Scan createScanFromConfiguration(Configuration conf) throws IOException {
Scan scan = new Scan();
if (conf.get(SCAN_ROW_START) != null) {
scan.setStartRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_START)));
}
if (conf.get(SCAN_ROW_STOP) != null) {
scan.setStopRow(Bytes.toBytesBinary(conf.get(SCAN_ROW_STOP)));
}
if (conf.get(SCAN_COLUMNS) != null) {
addColumns(scan, conf.get(SCAN_COLUMNS));
}
for (String columnFamily : conf.getTrimmedStrings(SCAN_COLUMN_FAMILY)) {
scan.addFamily(Bytes.toBytes(columnFamily));
}
if (conf.get(SCAN_TIMESTAMP) != null) {
scan.setTimestamp(Long.parseLong(conf.get(SCAN_TIMESTAMP)));
}
if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) {
scan.setTimeRange(
Long.parseLong(conf.get(SCAN_TIMERANGE_START)),
Long.parseLong(conf.get(SCAN_TIMERANGE_END)));
}
if (conf.get(SCAN_MAXVERSIONS) != null) {
scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
}
if (conf.get(SCAN_CACHEDROWS) != null) {
scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
}
if (conf.get(SCAN_BATCHSIZE) != null) {
scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE)));
}
// false by default, full table scans generate too much BC churn
scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
return scan;
}
@Override
protected void initialize(JobContext context) throws IOException {
// Do we have to worry about mis-matches between the Configuration from setConf and the one
// in this context?
TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE));
try {
//====================add authenticated user ===================
initializeTable(ConnectionFactory.createConnection(new Configuration(conf),user), tableName);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
/**
* Parses a combined family and qualifier and adds either both or just the
* family in case there is no qualifier. This assumes the older colon
* divided notation, e.g. "family:qualifier".
*
* @param scan The Scan to update.
* @param familyAndQualifier family and qualifier
* @throws IllegalArgumentException When familyAndQualifier is invalid.
*/
private static void addColumn(Scan scan, byte[] familyAndQualifier) {
byte [][] fq = CellUtil.parseColumn(familyAndQualifier);
if (fq.length == 1) {
scan.addFamily(fq[0]);
} else if (fq.length == 2) {
scan.addColumn(fq[0], fq[1]);
} else {
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
}
}
/**
* Adds an array of columns specified using old format, family:qualifier.
* <p>
* Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the
* input.
*
* @param scan The Scan to update.
* @param columns array of columns, formatted as <code>family:qualifier</code>
* @see Scan#addColumn(byte[], byte[])
*/
public static void addColumns(Scan scan, byte [][] columns) {
for (byte[] column : columns) {
addColumn(scan, column);
}
}
/**
* Calculates the splits that will serve as input for the map tasks. The
* number of splits matches the number of regions in a table. Splits are shuffled if
* required.
* @param context The current job context.
* @return The list of input splits.
* @throws IOException When creating the list of splits fails.
* @see org.apache.hadoop.mapreduce.InputFormat#getSplits(
* org.apache.hadoop.mapreduce.JobContext)
*/
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException {
List<InputSplit> splits = super.getSplits(context);
if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) {
Collections.shuffle(splits);
}
return splits;
}
/**
* Convenience method to parse a string representation of an array of column specifiers.
*
* @param scan The Scan to update.
* @param columns The columns to parse.
*/
private static void addColumns(Scan scan, String columns) {
String[] cols = columns.split(" ");
for (String col : cols) {
addColumn(scan, Bytes.toBytes(col));
}
}
@Override
protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
if (conf.get(SPLIT_TABLE) != null) {
TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
//====================add authenticated user ===================
try (Connection conn = ConnectionFactory.createConnection(getConf(),user)) {
try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
return rl.getStartEndKeys();
}
}
}
return super.getStartEndKeys();
}
/**
* Sets split table in map-reduce job.
*/
public static void configureSplitTable(Job job, TableName tableName) {
job.getConfiguration().set(SPLIT_TABLE, tableName.getNameAsString());
}
}
主程序示例類
package com.css.bigdata.dataCompare;
import com.css.bigdata.dataCompare.hbase.HBaseUtil;
import com.css.bigdata.dataCompare.hbase.KerberosTableInputFormat;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class HBaseCompare {
private static Configuration getKerberosLoginConf(String cluster){
Configuration conf = HBaseUtil.getHbaseConfiguration(cluster);
//HBaseUtil.kerberosLogin(conf);
return conf;
}
//獲取hbase數據並轉換
private static JavaPairRDD<String, Map<String,String>> getTableDataRDD(Configuration hconf,String tableName, JavaSparkContext sc) throws IOException {
hconf.set(KerberosTableInputFormat.INPUT_TABLE,tableName);
//添加scan
String scanToString = TableMapReduceUtil.convertScanToString(new Scan());
hconf.set(KerberosTableInputFormat.SCAN, scanToString);
//hbase數據轉化為RDD
JavaPairRDD<ImmutableBytesWritable, Result> dataRDD = sc.newAPIHadoopRDD(hconf,KerberosTableInputFormat.class,ImmutableBytesWritable.class,Result.class);
//hbase的Result對象不支持序列化
JavaPairRDD<String, Map<String,String>> dataRowsRDD = dataRDD.mapToPair(new PairFunction<Tuple2<ImmutableBytesWritable, Result>, String, Map<String,String>>() {
@Override
public Tuple2<String, Map<String,String>> call(Tuple2<ImmutableBytesWritable, Result> immutableBytesWritableResultTuple2) throws Exception {
Result result = immutableBytesWritableResultTuple2._2;
HashMap<String,String> resultMap = new HashMap<String, String>();
for(Cell cell : result.rawCells()) {
resultMap.put(new String(CellUtil.cloneQualifier(cell)).toLowerCase(), new String(CellUtil.cloneValue(cell)));
}
return new Tuple2<>(Bytes.toString(result.getRow()),resultMap);
}
});
return dataRowsRDD;
}
public static void main(String[] args) {
String ip = args[0];
String table = args[1];
//SparkSession session = SparkSession.builder().appName("hbase example").master("local").getOrCreate();
SparkSession session = SparkSession.builder().appName("hbase example").getOrCreate();
JavaSparkContext sc = JavaSparkContext.fromSparkContext(session.sparkContext());
Configuration srcConf = getKerberosLoginConf(ip);
try{
JavaPairRDD<String, Map<String,String>> srcRowsRDD = getTableDataRDD(srcConf,table,sc);
//使用數據
//...
} catch (Exception e){
e.printStackTrace();
}
}
}
打包,提交yarn集群執行
打包時,依賴打入到lib目錄,kerberos的配置文件krb5.conf以及kerberos登錄的秘鑰文件dw_hbkal.tab文件打包到conf中,程序本身打成jar包放入bin目錄,然后以yarn-client模式提交任務
spark-submit --keytab ../conf/kerberos/dw_hbkal.keytab --principal dw_hbkal@CVBG.COM --files ../conf/kerberos/dw_hbkal.keytab,../conf/kerberos/krb5.conf --master yarn --jars ../lib/hbase-client-2.1.0-cdh6.2.1.jar,../lib/hbase-server-2.1.0-cdh6.2.1.jar,../lib/hbase-mapreduce-2.1.0-cdh6.2.1.jar --class com.css.bigdata.dataCompare.HBaseCompare data-compare-1.0-SNAPSHOT.jar 172.xxx.xxx.xxx testtable
記錄坑
-
未使用自定義的KerberosTableInputFormat的類,在主程序類HBaseCompare中的getKerberosLoginConf方法中進行了kerberos認證,在本地IDEA中以local模式運行時可以正常執行,但是當提交到yarn集群時,執行失敗,報錯提示executor無法訪問hbase集群,查了好久,突然意識到,主程序類中非RDD操作相關的代碼是在driver端執行的,相當於在driver端進行了認證,而executor執行時並沒有進行認證,后來找到了這篇博客有kerberos認證hbase在spark環境下的使用,重寫了KerberosTableInputFormat類,並在該類中進行了kerberos認證。
-
解決了上述問題后,考慮到應用的jar包會被分發到各個executor節點中,因此將dw_hbkal.keytab、krb5.conf文件打到了jar包中,然后在代碼KerberosCheckUtil中返回文件路徑,然而提交后,程序一直提示找不到文件。。。於是又嘗試將這兩個文件在集群上各個節點存放了一份,並在KerberosCheckUtil中返回了文件的絕對路徑,然而程序運行時依舊提示找不到文件。。
-
查看spark-submit命令,發現有--files參數,並說明通過該參數提交的文件會被分發到各個executor節點的運行內存中,於是果斷試驗一把,jar中不打入kerberos文件,然后在spark-submit提交時,加上了--files參數,終於程序正常運行了。反思了下,任務是在yarn容器中運行的,實際路徑並不知道是什么樣的,寫入絕對路徑或者文件放入jar中這些方式,kerberos認證時並不能找到文件,而通過spark-submit --files選項,spark自身已經解決了這些問題,保證可以在內存中讀到這些文件,不知道是不是可以這樣理解?