相關文章:
- ELK 架構之 Elasticsearch 和 Kibana 安裝配置
- ELK 架構之 Logstash 和 Filebeat 安裝配置
- ELK 架構之 Logstash 和 Filebeat 配置使用(采集過濾)
- Spring Boot 使用 Log4j2
之前安裝 ELK 的版本是 5.6.9,而且使用yum命令安裝的,這邊用最新版本 6.2.4 重新再安裝一遍,不再使用yum命令安裝,而是直接下載tar.gz包,解壓執行命令運行。
ELK Stach 包地址:https://www.elastic.co/downloads/past-releases
1. Elasticsearch
之前 Elasticsearch 運行在 root 命令下,因為 Elasticsearch 運行會接受腳本,所以為了安全,正式環境要用非 root 賬戶進行運行。
創建用戶組及用戶:
[root@node1 ~]# groupadd es &&
useradd es -g es
切換到 es 用戶:
[root@node1 ~]# su es
下載 Elasticsearch 包並解壓:
[es@node1 ~]# cd /home/es
[es@node1 es]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.4.tar.gz
tar -zvxf elasticsearch-6.2.4.tar.gz
編輯配置文件:
[es@node1 es]# vi /home/es/elasticsearch-6.2.4/config/elasticsearch.yml
network.host: node1
http.port: 9200
啟動 Elasticsearch:
[es@node1 es]# cd bin
[es@node1 es]# ./elasticsearch
出現下面錯誤:
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
解決方案,切換到 root 用戶,並在/etc/security/limits.conf中添加配置:
[es@node1 es]# su root
[root@node1 ~]# vi /etc/security/limits.conf
* soft nofile 65536
* hard nofile 131072
* soft nproc 2048
* hard nproc 4096
然后切換到 es 用戶,后台啟動 Elasticsearch:
[es@node1 ~]# cd /home/es/elasticsearch-6.2.4/bin
[es@node1 es]# ./elasticsearch -d
查看 Elasticsearch 運行是否正常:
[es@node1 es]# curl http://node1:9200/
{
"name" : "rK2jCU6",
"cluster_name" : "elasticsearch",
"cluster_uuid" : "m6_Ijnd3Qki3HN20S-Bajg",
"version" : {
"number" : "6.2.4",
"build_hash" : "ccec39f",
"build_date" : "2018-04-12T20:37:28.497551Z",
"build_snapshot" : false,
"lucene_version" : "7.2.1",
"minimum_wire_compatibility_version" : "5.6.0",
"minimum_index_compatibility_version" : "5.0.0"
},
"tagline" : "You Know, for Search"
}
2. Kibana
下載 Kibana 包並解壓:
[root@node1 ~]# mkdir software && cd software
[root@node1 software]# wget https://artifacts.elastic.co/downloads/kibana/kibana-6.2.4.tar.gz
[root@node1 software]# tar -zvxf kibana-6.2.4-linux-x86_64.tar.gz
編輯配置文件:
[root@node1 software]# vi /software/kibana-6.2.4-linux-x86_64/config/kibana.yml
# Kibana is served by a back end server. This setting specifies the port to use.
server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "192.168.0.11"
# The Kibana server's name. This is used for display purposes.
server.name: "kibana-server"
# The URL of the Elasticsearch instance to use for all your queries.
elasticsearch.url: "http://node1:9200"
后台運行 Kibana:
[root@node1 software]# cd /software/kibana-6.2.4-linux-x86_64/bin
[root@node1 software]# nohup ./kibana &
瀏覽器訪問:http://node1:5601/
3. Logstash
下載 Logstash 包並解壓:
[root@node1 software]# wget https://artifacts.elastic.co/downloads/logstash/logstash-6.2.4.tar.gz
[root@node1 software]# tar -zvxf logstash-6.2.4.tar.gz
創建logstash.conf配置文件:
[root@node1 software]# mkdir /software/logstash-6.2.4/conf.d
[root@node1 software]# vi /software/logstash-6.2.4/conf.d/logstash.conf
添加下面配置內容:
input {
beats {
port => 10515
}
}
filter {
if [fields][logtype] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
add_field => [ "received_at", "%{@timestamp}" ]
add_field => [ "received_from", "%{host}" ]
}
syslog_pri { }
date {
match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
if [fields][logtype] == "spring-boot-log4j2" {
json {
source => "message"
target => "data"
}
}
}
output {
if [fields][logtype] == "spring-boot-log4j2"{
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "spring-boot-log4j2-%{+YYYY.MM.dd}"
}
}
if [fields][logtype] == "syslog"{
elasticsearch {
hosts => ["127.0.0.1:9200"]
index => "filebeat-%{+YYYY.MM.dd}"
}
}
}
后台啟動 Logstash:
[root@node1 software]# cd /software/logstash-6.2.4/bin
[root@node1 bin]# nohup ./logstash -f ../conf.d/logstash.conf &
查看端口是否被監聽:
[root@node1 bin]# netstat -lntp |grep 10515
tcp6 0 0 :::10515 :::* LISTEN 28934/java
4. Filebeat
下載 Filebeat 包並解壓:
[root@node1 software]# wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.2.4-linux-x86_64.tar.gz
[root@node1 software]# tar -zvxf filebeat-6.2.4-linux-x86_64.tar.gz
編輯配置文件:
[root@node1 software]# vi /software/filebeat-6.2.4-linux-x86_64/filebeat.yml
添加如下配置內容:
filebeat.prospectors:
- input_type: log
paths:
- /var/log/spring-boot-log4j2/*.log
document_type: "spring-boot-log4j2" # 定義寫入 ES 時的 _type 值
multiline:
#pattern: '^\s*(\d{4}|\d{2})\-(\d{2}|[a-zA-Z]{3})\-(\d{2}|\d{4})' # 指定匹配的表達式(匹配以 2017-11-15 08:04:23:889 時間格式開頭的字符串)
pattern: '^\s*("{)' # 指定匹配的表達式(匹配以 "{ 開頭的字符串)
negate: true # 是否匹配到
match: after # 合並到上一行的末尾
max_lines: 1000 # 最大的行數
timeout: 30s # 如果在規定的時候沒有新的日志事件就不等待后面的日志
fields:
logsource: node1
logtype: spring-boot-log4j2
- input_type: log
paths:
- /var/log/messages
#- /var/log/*.log
document_type: "syslog" # 定義寫入 ES 時的 _type 值
fields:
logsource: node1
logtype: syslog
#output.elasticsearch:
#hosts: ["node1:9200"]
output.logstash:
hosts: ["node1:10515"]
后台啟動 Filebeat:
[root@node1 software]# cd /software/filebeat-6.2.4-linux-x86_64
[root@node1 bin]# nohup ./filebeat -e -c filebeat.yml &
關於 ELK 集成 Spring Boot Log4j2,可以查看下之前的文章。
參考資料:
