配置文件
cd /usr/app/flume1.6/conf
vi flume-dirTohdfs.properties
#agent1 name agent1.sources=source1 agent1.sinks=sink1 agent1.channels=channel1 #Spooling Directory #set source1 agent1.sources.source1.type=spooldir agent1.sources.source1.spoolDir=/usr/app/flumelog/dir/logdfs agent1.sources.source1.channels=channel1 agent1.sources.source1.fileHeader = false agent1.sources.source1.interceptors = i1 agent1.sources.source1.interceptors.i1.type = timestamp #set sink1 agent1.sinks.sink1.type=hdfs agent1.sinks.sink1.hdfs.path=/user/yuhui/flume agent1.sinks.sink1.hdfs.fileType=DataStream agent1.sinks.sink1.hdfs.writeFormat=TEXT agent1.sinks.sink1.hdfs.rollInterval=1 agent1.sinks.sink1.channel=channel1 agent1.sinks.sink1.hdfs.filePrefix=%Y-%m-%d #set channel1 agent1.channels.channel1.type=file agent1.channels.channel1.checkpointDir=/usr/app/flumelog/dir/logdfstmp/point agent1.channels.channel1.dataDirs=/usr/app/flumelog/dir/logdfstmp
建立Linux目錄
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfs
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfstmp
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfstmp/point
建立Hadoop目錄
[root@hadoop11 app]#hadoop fs -mkdir /user/yuhui/flume
啟動配置文件
flume-ng agent -n agent1 -c conf -f ./flume-dirTohdfs.properties -Dflume.root.logger=DEBUG,console >./flume1.log 2>&1 &
- 1
- 2
- 1
- 2

查看啟動進程: 
建立數據測試 
查看hdfs數據測試路徑沒有數據 
將測試數據放入監控路徑,Flume讀完數據之后,文件會自動更名 
查看hdfs數據測試路徑,Flume輸出的數據 
備注
日志監控路徑,只要日志放入則被Flume監控
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfs
日志讀取完畢存儲路徑,日志在這里則一直存儲在Channel中(最多只有兩個log-number日志,且默認達到1.6G之后刪除前面一個log,建立新的log)
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfstmp
日志監控路徑中文件的路徑存放點
[root@hadoop11 app]# mkdir /usr/app/flumelog/dir/logdfstmp/point

