安裝(解壓即安裝)
#將flume安裝包上傳到hadoop集群的一個節點上
scp apache-flume-1.6.0-bin.tar.gz mini1:/root/apps/
#解壓
cd /root/apps/
tar -zxvf apache-flume-1.6.0-bin.tar.gz -C install
cd install
mv apache-flume-1.6.0-bin flume
Flume支持眾多的source和sink類型,詳細手冊可參考官方文檔
http://flume.apache.org/FlumeUserGuide.html
根據采集需求編寫配置文件
從網絡端口接收數據,下沉到logger
vim flume/conf/netcat-logger.conf
#給那三個組件取個名字
a1.sources = r1
a1.sinks = k1
a1.channels = c1
#類型, 從網絡端口接收數據,在本機啟動, 所以localhost, type=spoolDir采集目錄源,目錄里有就采
a1.sources.r1.type = netcat
a1.sources.r1.bind = 192.168.38.3
a1.sources.r1.port = 44444
# Describe the sink
a1.sinks.k1.type = logger
# Use a channel which buffers events in memory
#下沉的時候是一批一批的, 下沉的時候是一個個eventChannel參數解釋:
#capacity:默認該通道中最大的可以存儲的event數量
#trasactionCapacity:每次最大可以從source中拿到或者送到sink中的event數量
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# 將數據來源與去向綁定到中間的數據通道
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
啟動命令:
#告訴flum啟動一個agent,指定配置參數, --name:agent的名字,
$ bin/flume-ng agent --conf conf --conf-file conf/netcat-logger.conf --name a1 -Dflume.root.logger=INFO,console
或
bin/flume-ng agent -c conf -f conf/netcat-logger.conf -n a1 -Dflume.root.logger=INFO,console
-c conf 指定flume自身的配置文件所在目錄
-f conf/netcat-logger.conf 指定我們所描述的采集方案
-n a1 指定我們這個agent的名字
#測試(使用telnet連接44444端口)
$ telnet localhost 44444
Trying 127.0.0.1...
Connected to localhost.localdomain (127.0.0.1).
Escape character is '^]'.
Hello world! <ENTER>
OK
從文件夾接收數據
vim ./conf/spool-logger.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
#監聽目錄,spoolDir指定目錄, fileHeader要不要給文件夾前墜名
a1.sources.r1.type = spooldir
a1.sources.r1.spoolDir = /root/flume_data_dir
a1.sources.r1.fileHeader = true
# Describe the sink
a1.sinks.k1.type = logger
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#啟動命令:
bin/flume-ng agent -c ./conf -f ./conf/spool-logger.conf -n a1 -Dflume.root.logger=INFO,console
#測試:
往/root/flume_data_dir放文件
從命令獲取數據下沉到avro端口(可使用avro實現多個agent的數據發送到一個agent)
vim tail-avro.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = exec
a1.sources.r1.command = tail -F /home/hadoop/log/test.log
a1.sources.r1.channels = c1
# Describe the sink
#綁定的不是本機, 是另外一台機器的服務地址, sink端的avro是一個發送端, avro的客戶端, 往mini2這個機器上發
a1.sinks = k1
a1.sinks.k1.type = avro
a1.sinks.k1.channel = c1
a1.sinks.k1.hostname = mini2
a1.sinks.k1.port = 4141
a1.sinks.k1.batch-size = 2
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#啟動命令
bin/flume-ng agent -c ./conf -f ./conf/tail-avro.conf -n a1
從avro端口接收數據(可接收來自多個avro客戶端的數據,下沉到hdfs)
vim ./conf/avro-hdfs.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = avro
a1.sources.r1.channels = c1
#綁定本機所有ip地址(0.0.0.0)
#a1.sources.r1.bind = 0.0.0.0
a1.sources.r1.bind = mini2
a1.sources.r1.port = 4141
# Describe the sink
a1.sinks.k1.type = logger
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#啟動命令:
bin/flume-ng agent -c ./conf -f ./conf/avro-hdfs.conf -n a1 -Dflume.root.logger=INFO,console
測試,發送數據:
$ bin/flume-ng avro-client -H localhost -p 4141 -F /usr/logs/log.10
從命令接收數據下沉到hdfs
vim tail-hdfs.conf
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
#exec 指的是命令
# Describe/configure the source
a1.sources.r1.type = exec
#F根據文件名追中, f根據文件的nodeid追中
a1.sources.r1.command = tail -F /root/test_data/test.log
a1.sources.r1.channels = c1
# Describe the sink
#下沉目標
a1.sinks.k1.type = hdfs
a1.sinks.k1.channel = c1
#指定目錄, flum幫做目的替換
a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/%H%M/
#文件的命名, 前綴
a1.sinks.k1.hdfs.filePrefix = events-
#10 分鍾就改目錄
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
#多久滾動一次(秒)
a1.sinks.k1.hdfs.rollInterval = 3
#文件滾動的大小限制(bytes)
a1.sinks.k1.hdfs.rollSize = 500
#寫入多少個event數據后滾動文件(事件個數)
a1.sinks.k1.hdfs.rollCount = 20
#5個事件就往里面寫入(緩存中有五個事件就開始往HDFS寫)
a1.sinks.k1.hdfs.batchSize = 5
#用本地時間格式化目錄
a1.sinks.k1.hdfs.useLocalTimeStamp = true
#下沉后, 生成的文件類型,默認是Sequencefile,可用DataStream,則為普通文本
a1.sinks.k1.hdfs.fileType = DataStream
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#創建文件
touch /root/test_data/test.log
#不斷往test.log中寫入數據,模擬不斷生成的日志信息
while true
do
echo 111111 >> /home/hadoop/log/test.log
sleep 0.5
done
#啟動flume接收數據
bin/flume-ng agent -c conf -f conf/tail-hdfs.conf -n a1