hive中使用spark执行引擎的常用参数


set hive.execution.engine=spark;
set hive.exec.parallel=true;
set hive.exec.parallel.thread.number=8;
set hive.exec.compress.intermediate=true;
set hive.intermediate.compression.codec=org.apache.hadoop.io.compress.SnappyCodec;
set hive.intermediate.compression.type=BLOCK;
set hive.exec.compress.output=true;
set mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec;
set mapred.output.compression.type=BLOCK;

 

set mapreduce.job.queuename=uat2;(设置hive的运行队列)

set hive.exec.reducers.max=2400;
set mapreduce.job.reduces=2004;
set hive.exec.reducers.bytes.per.reducer=24;

set mapred.child.java.opts = -Xmx3024m;
set mapreduce.reduce.memory.mb =4096;
set mapreduce.map.memory.mb= 4096;

set hive.exec.parallel.thread.number=16;

-hiveconf  hive.exec.parallel.thread.number=16 

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM