1. 小文件合並位置 :
1. map輸入端 對小文件合並
2. map輸出端 對小文件合並
3. reduce輸出端 對小文件合並
2. map輸入時,對小文件合並
參數設置 :
-- CombineHiveInputFormat 按切片大小切分(多個小文件可歸一個切片) -- 默認實現類 為CombineHiveInputFormat
set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
說明 : CombineHiveInputFormat 是 HiveInputFormat 的子類,對切片規則進行了優化
當小文件不滿足切片(filesize < splitsize)時,會根據切片大小,將多個小文件划分成一個切片
測試(map輸入時,對小文件合並)

-- 測試(map輸入時,對小文件合並) -- 測試1 : 文件個數3、文件大小34.8、inputformat類HiveInputFormat、切片大小128M -- 預期 : 切片個數=文件個數=mapTask=3個
34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user.txt 34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user1.txt 34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user2.txt set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; select substr(uploader,0,1) ,count(1) from gulivideo_user_ori group by substr(uploader,0,1); Hadoop job information for Stage-1: number of mappers: 3; number of reducers: 1 Time taken: 14.557 seconds, Fetched: 62 row(s) -- 測試2 : 文件個數3、文件大小34.8、inputformat類CombineHiveInputFormat、切片大小128M -- 預期 : 切片個數=文件個數=mapTask=3個
34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user.txt 34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user1.txt 34.8 M 104.4 M /user/hive/warehouse/home.db/gulivideo_user_ori/user2.txt set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; select substr(uploader,0,1) ,count(1) from gulivideo_user_ori group by substr(uploader,0,1); Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1 Time taken: 12.913 seconds, Fetched: 62 row(s)
3. 在MapTask、ReduceTask任務結束時,對小文件合並
參數設置 :
-- 在 map-only 任務結束時合並小文件,默認 true
SET hive.merge.mapfiles=true; -- 在 map-reduce 任務結束時合並小文件,默認 false
SET hive.merge.mapredfiles=true; -- 合並文件的大小,默認 256M
set hive.merge.size.per.task=256000000; -- 當輸出文件的平均大小小於該值時,啟動一個獨立的 map-reduce 任務進行文件 merge
-- 默認值為16M
SET hive.merge.smallfiles.avgsize=16000000;
測試(在Mr任務結束時,對小文件合並)

-- 測試(在Mr任務結束時,對小文件合並) -- 測試1 : 文件個數1、文件大小34.8、不開啟任務結束后文件合並、reduce個數3
set mapreduce.job.reduces=3; SET hive.merge.mapredfiles=false; -- 任務結束時,不對小文件進行合並
set yarn.scheduler.maximum-allocation-mb=118784; set mapreduce.map.memory.mb=4096; set mapreduce.reduce.memory.mb=4096; set yarn.nodemanager.vmem-pmem-ratio=4.2; create table mergeTab as
select substr(uploader,0,1) ,count(1) from gulivideo_user_ori group by substr(uploader,0,1); Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 3 Time taken: 32.277 seconds 162 486 /user/hive/warehouse/home.db/mergetab/000000_0
157 471 /user/hive/warehouse/home.db/mergetab/000001_0
163 489 /user/hive/warehouse/home.db/mergetab/000002_0
-- 測試2 : 文件個數1、文件大小34.8、不開啟任務結束后文件合並、reduce個數3、合並文件大小256M
set mapreduce.job.reduces=3; SET hive.merge.mapredfiles=true; -- 任務結束時,對小文件進行合並
set hive.merge.size.per.task=256000000; set yarn.scheduler.maximum-allocation-mb=118784; set mapreduce.map.memory.mb=4096; set mapreduce.reduce.memory.mb=4096; set yarn.nodemanager.vmem-pmem-ratio=4.2; create table mergeTab2 as
select substr(uploader,0,1) ,count(1) from gulivideo_user_ori group by substr(uploader,0,1); Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 3 Time taken: 47.983 seconds 482 1.4 K /user/hive/warehouse/home.db/mergetab2/000000_0
-- 測試3 : 文件個數1、文件大小34.8、不開啟任務結束后文件合並、reduce個數3、合並文件大小300bytes
set mapreduce.job.reduces=3; SET hive.merge.mapfiles = true; SET hive.merge.mapredfiles=true; -- 任務結束時,對小文件進行合並
set hive.merge.size.per.task=100; set yarn.scheduler.maximum-allocation-mb=118784; set mapreduce.map.memory.mb=4096; set mapreduce.reduce.memory.mb=4096; set yarn.nodemanager.vmem-pmem-ratio=4.2; create table mergeTab2 as
select substr(uploader,0,1) ,count(1) from gulivideo_user_ori group by substr(uploader,0,1); Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 3 Time taken: 53.033 seconds