hive中使用spark執行引擎的常用參數


set hive.execution.engine=spark;
set hive.exec.parallel=true;
set hive.exec.parallel.thread.number=8;
set hive.exec.compress.intermediate=true;
set hive.intermediate.compression.codec=org.apache.hadoop.io.compress.SnappyCodec;
set hive.intermediate.compression.type=BLOCK;
set hive.exec.compress.output=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
set mapred.output.compression.type=BLOCK;

 

set mapreduce.job.queuename=uat2;(設置hive的運行隊列)

set hive.exec.reducers.max=2400;
set mapreduce.job.reduces=2004;
set hive.exec.reducers.bytes.per.reducer=24;

set mapred.child.java.opts = -Xmx3024m;
set mapreduce.reduce.memory.mb =4096;
set mapreduce.map.memory.mb= 4096;

set hive.exec.parallel.thread.number=16;

-hiveconf  hive.exec.parallel.thread.number=16 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM