環境查看
# cat /etc/redhat-release CentOS Linux release 7.5.1804 (Core) [root@localhost elasticsearch]# uname -a Linux localhost.localdomain 3.10.0-862.el7.x86_64 #1 SMP Fri Apr 20 16:44:24 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux [root@localhost elasticsearch]# docker info Client: Debug Mode: false Server: Containers: 1 Running: 1 Paused: 0 Stopped: 0 Images: 5 Server Version: 19.03.8 Storage Driver: overlay2 Backing Filesystem: <unknown> Supports d_type: true Native Overlay Diff: true Logging Driver: json-file Cgroup Driver: cgroupfs Plugins: Volume: local Network: bridge host ipvlan macvlan null overlay Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog Swarm: inactive Runtimes: runc Default Runtime: runc Init Binary: docker-init containerd version: 7ad184331fa3e55e52b890ea95e65ba581ae3429 runc version: dc9208a3303feef5b3839f4323d9beb36df0a9dd init version: fec3683 Security Options: seccomp Profile: default Kernel Version: 3.10.0-862.el7.x86_64 Operating System: CentOS Linux 7 (Core) OSType: linux Architecture: x86_64 CPUs: 1 Total Memory: 5.67GiB Name: localhost.localdomain ID: PGGH:4IF4:TXUV:3CSM:LZZY:KVTA:FONM:WJIO:KVME:YYJJ:55IZ:WR7Q Docker Root Dir: /var/lib/docker Debug Mode: false Registry: https://index.docker.io/v1/ Labels: Experimental: false Insecure Registries: 192.168.1.11 192.168.1.61 127.0.0.0/8 Registry Mirrors: https://7sl94zzz.mirror.aliyuncs.com/ Live Restore Enabled: false
使用Docker搭建ELK日志收集系統
搭建單機日志收集系統
下載鏡像
下載elasticsearch和kibana鏡像
docker pull docker.elastic.co/elasticsearch/elasticsearch:6.6.2 docker pull docker.elastic.co/kibana/kibana:6.6.2
設置elasticsearch配置文件
# cat elasticsearch.yml cluster.name: myes #node.name: node-1 path.data: /usr/share/elasticsearch/data #path.logs: /var/log/elasticsearch bootstrap.memory_lock: false network.host: 0.0.0.0 http.port: 9200
掛載數據目錄需要事先創建,本次創建掛載目錄與容器目錄路徑一致
mkdir -p /usr/share/elasticsearch/data/
啟動elasticsearch
docker run -d --name elasticsearch -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -v /nas/nas/scripts/docker_es_kibana/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml -v /usr/share/elasticsearch/data:/usr/share/elasticsearch/data docker.elastic.co/elasticsearch/elasticsearch:6.6.2
參數解析
docker run -d #后台啟動 --name elasticsearch #容器命名 -p 9200:9200 -p 9300:9300 #映射到主機的端口 -e "discovery.type=single-node" #設置環境為單node模式 -v /nas/nas/scripts/docker_es_kibana/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml #掛載配置文件 -v /usr/share/elasticsearch/data:/usr/share/elasticsearch/data#掛載數據目錄 docker.elastic.co/elasticsearch/elasticsearch:6.6.2#使用鏡像啟動
查看是否啟動
訪問測試
# curl 192.168.1.227:9200 { "name" : "4GGPsYy", "cluster_name" : "myes ", "cluster_uuid" : "dIkPP_6JTOiOIK_3pFCYFg", "version" : { "number" : "6.6.2", "build_flavor" : "default", "build_type" : "tar", "build_hash" : "3bd3e59", "build_date" : "2019-03-06T15:16:26.864148Z", "build_snapshot" : false, "lucene_version" : "7.6.0", "minimum_wire_compatibility_version" : "5.6.0", "minimum_index_compatibility_version" : "5.0.0" }, "tagline" : "You Know, for Search" }
數據目錄有對應數據
# tree /usr/share/elasticsearch/data/nodes/ /usr/share/elasticsearch/data/nodes/ └── 0 ├── indices │ ├── H8PfvdGxQWqrKlbtUwNqQQ │ │ ├── 0 │ │ │ ├── index │ │ │ │ ├── _0.cfe │ │ │ │ ├── _0.cfs │ │ │ │ ├── _0.si │ │ │ │ ├── segments_4 │ │ │ │ └── write.lock │ │ │ ├── _state │ │ │ │ └── state-1.st │ │ │ └── translog │ │ │ ├── translog-1.ckp │ │ │ ├── translog-1.tlog │ │ │ ├── translog-2.ckp │ │ │ ├── translog-2.tlog │ │ │ ├── translog-3.ckp │ │ │ ├── translog-3.tlog │ │ │ ├── translog-4.tlog │ │ │ └── translog.ckp │ │ ├── 1 │ │ │ ├── index │ │ │ │ ├── _0.cfe │ │ │ │ ├── _0.cfs │ │ │ │ ├── _0.si │ │ │ │ ├── segments_4 │ │ │ │ └── write.lock │ │ │ ├── _state │ │ │ │ └── state-1.st │ │ │ └── translog │ │ │ ├── translog-1.ckp │ │ │ ├── translog-1.tlog │ │ │ ├── translog-2.ckp │ │ │ ├── translog-2.tlog │ │ │ ├── translog-3.ckp │ │ │ ├── translog-3.tlog │ │ │ ├── translog-4.tlog │ │ │ └── translog.ckp │ │ ├── 2 │ │ │ ├── index │ │ │ │ ├── _0.cfe │ │ │ │ ├── _0.cfs │ │ │ │ ├── _0.si │ │ │ │ ├── segments_4 │ │ │ │ └── write.lock │ │ │ ├── _state │ │ │ │ └── state-1.st │ │ │ └── translog │ │ │ ├── translog-1.ckp │ │ │ ├── translog-1.tlog │ │ │ ├── translog-2.ckp │ │ │ ├── translog-2.tlog │ │ │ ├── translog-3.ckp │ │ │ ├── translog-3.tlog │ │ │ ├── translog-4.tlog │ │ │ └── translog.ckp │ │ ├── 3 │ │ │ ├── index │ │ │ │ ├── _0.cfe │ │ │ │ ├── _0.cfs │ │ │ │ ├── _0.si │ │ │ │ ├── segments_4 │ │ │ │ └── write.lock │ │ │ ├── _state │ │ │ │ └── state-1.st │ │ │ └── translog │ │ │ ├── translog-1.ckp │ │ │ ├── translog-1.tlog │ │ │ ├── translog-2.ckp │ │ │ ├── translog-2.tlog │ │ │ ├── translog-3.ckp │ │ │ ├── translog-3.tlog │ │ │ ├── translog-4.tlog │ │ │ └── translog.ckp │ │ ├── 4 │ │ │ ├── index │ │ │ │ ├── _0.cfe │ │ │ │ ├── _0.cfs │ │ │ │ ├── _0.si │ │ │ │ ├── _1.cfe │ │ │ │ ├── _1.cfs │ │ │ │ ├── _1.si │ │ │ │ ├── segments_4 │ │ │ │ └── write.lock │ │ │ ├── _state │ │ │ │ └── state-1.st │ │ │ └── translog │ │ │ ├── translog-1.ckp │ │ │ ├── translog-1.tlog │ │ │ ├── translog-2.ckp │ │ │ ├── translog-2.tlog │ │ │ ├── translog-3.ckp │ │ │ ├── translog-3.tlog │ │ │ ├── translog-4.tlog │ │ │ └── translog.ckp │ │ └── _state │ │ └── state-9.st │ └── M7yC3_RVS86dex_vcGbHZw │ ├── 0 │ │ ├── index │ │ │ ├── _0.cfe │ │ │ ├── _0.cfs │ │ │ ├── _0.si │ │ │ ├── _2.cfe │ │ │ ├── _2.cfs │ │ │ ├── _2.si │ │ │ ├── _4.cfe │ │ │ ├── _4.cfs │ │ │ ├── _4.si │ │ │ ├── _a.cfe │ │ │ ├── _a.cfs │ │ │ ├── _a.si │ │ │ ├── segments_6 │ │ │ └── write.lock │ │ ├── _state │ │ │ └── state-1.st │ │ └── translog │ │ ├── translog-1.ckp │ │ ├── translog-1.tlog │ │ ├── translog-2.ckp │ │ ├── translog-2.tlog │ │ ├── translog-3.ckp │ │ ├── translog-3.tlog │ │ ├── translog-4.ckp │ │ ├── translog-4.tlog │ │ ├── translog-5.tlog │ │ └── translog.ckp │ └── _state │ └── state-7.st ├── node.lock └── _state ├── global-15.st └── node-1.st 31 directories, 103 files
啟動kibana服務
設置kibana配置文件
# cat kibana.yml server.port: 5601 server.host: "0.0.0.0" elasticsearch.hosts: ["http://192.168.1.227:9200"] kibana.index: ".kibana"
啟動kibana服務容器
docker run -d --name kibana -p 5601:5601 -v /nas/nas/scripts/docker_es_kibana/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml docker.elastic.co/kibana/kibana:6.6.2
參數解析
docker run -d#后台啟動 --name kibana #定義容器名稱 -p 5601:5601 #映射端口 -v /nas/nas/scripts/docker_es_kibana/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml #掛載配置文件 docker.elastic.co/kibana/kibana:6.6.2 #使用kibana容器鏡像
查看是否啟動
web頁面訪問測試
使用k8s搭建ELK日志收集系統
為搭建方便本次使用k8s搭建不掛載存儲,使用私有化docker鏡像倉庫harbor
創建elasticsearch的yaml配置文件,使用有狀態部署
# cat es-statefulset.yaml apiVersion: apps/v1 kind: StatefulSet metadata: labels: app: es name: es spec: replicas: 1 serviceName: "es" selector: matchLabels: app: es template: metadata: labels: app: es spec: imagePullSecrets: - name: registry-pull-secret containers: - image: 192.168.1.61/project/elasticsearch:6.6.2 imagePullPolicy: Always name: elasticsearch
創建
kubectl apply -f es-statefulset.yaml
查看pod
# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES es-0 1/1 Running 0 16m 172.17.84.2 192.168.1.66 <none> <none> nginx-86c57db685-lfv7n 1/1 Running 1 16d 172.17.21.2 192.168.1.65 <none> <none>
可以在node使用容器IP訪問測試
# curl 172.17.84.2:9200 { "name" : "Fhkl0a-", "cluster_name" : "docker-cluster", "cluster_uuid" : "S2-e82d4TQ-Up4qVR4k-6g", "version" : { "number" : "6.6.2", "build_flavor" : "default", "build_type" : "tar", "build_hash" : "3bd3e59", "build_date" : "2019-03-06T15:16:26.864148Z", "build_snapshot" : false, "lucene_version" : "7.6.0", "minimum_wire_compatibility_version" : "5.6.0", "minimum_index_compatibility_version" : "5.0.0" }, "tagline" : "You Know, for Search" }
創建statefulset的內部svc
使用命令生成cluster的svc的yaml配置文件
kubectl create service clusterip es --clusterip="None" --tcp=9200:9200 --tcp=9300:9300 --dry-run -o yaml >es-cluster-svc.yaml
命令解析
kubectl create service clusterip es #創建一個cluster服務名字為es --clusterip="None" #clusterip為None即在內部使用域名訪問 --tcp=9200:9200 --tcp=9300:9300#掛載端口 --dry-run #不運行只檢查命令是否正確 -o yaml#生成yaml配置文件
配置文件如下
# cat es-cluster-svc.yaml apiVersion: v1 kind: Service metadata: labels: app: es name: es spec: clusterIP: None ports: - name: 9200-9200 port: 9200 protocol: TCP targetPort: 9200 - name: 9300-9300 port: 9300 protocol: TCP targetPort: 9300 selector: app: es type: ClusterIP
應用
kubectl apply -f es-cluster-svc.yaml
查看
創建一個busybox容器測試內部域名訪問是否生效
Pod域名格式為${podname}.${serviceName}.${namespace}
這里pod名是es-0 serviceName是es 命名空間是默認命名空間default
注意:service服務名需要與serviceName對應一致否則會不通
kubectl run -i -t --rm busybox --image=busybox:1.28.4 --restart=Never
如果需要elasticsearch可以外部訪問則需要創建一個nodeport service
使用命令生成yaml配置文件
kubectl create service nodeport es --tcp=9200:9200 --dry-run -o yaml >es-nodeport-svc.yaml
命令解析
kubectl create service nodeport es #創建一個nodeport的service對應的標簽是es默認創建的service名也為es --tcp=9200:9200 #port:targetport port為servicr端口,targetport為pod即容器端口 --dry-run #只測試不運行 -o yaml #輸出yaml >es-nodeport-svc.yaml
原始文件為
apiVersion: v1 kind: Service metadata: creationTimestamp: null labels: app: es name: es spec: ports: - name: 9200-9200 port: 9200 protocol: TCP targetPort: 9200 selector: app: es type: NodePort status: loadBalancer: {}
修改一下,去掉不必要的選項,修改以后如下
# cat es-nodeport-svc.yaml apiVersion: v1 kind: Service metadata: labels: app: es name: es-nodeport-svc spec: ports: - name: 9200-9200 port: 9200 protocol: TCP targetPort: 9200 nodePort: 9200 selector: app: es type: NodePort
應用
kubectl apply -f es-nodeport-svc.yaml
查看
使用任意node IP加端口即可訪問
創建kibana服務
創建kibana配置文件,注意使用域名連接elasticsearech
# cat kibana.yml server.port: 5601 server.host: "0.0.0.0" elasticsearch.url: "http://es-0.es.default:9200" kibana.index: ".kibana"
創建kibana配置configmap
kubectl create configmap kibana-config --from-file=kibana.yml
創建kibana的yaml配置文件
# cat kibana-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: app: kibana name: kibana spec: replicas: 1 selector: matchLabels: app: kibana template: metadata: labels: app: kibana spec: imagePullSecrets: - name: registry-pull-secret containers: - image: 192.168.1.61/project/kibana:6.6.2 imagePullPolicy: Always name: kibana volumeMounts: - name: kibana-config mountPath: /usr/share/kibana/config/kibana.yml subPath: kibana.yml volumes: - name: kibana-config configMap: name: kibana-config
應用
kubectl apply -f kibana-deployment.yaml
查看
使用命令創建kibana的service配置yaml文件
kubectl create service nodeport kibana --tcp=5601:5601 --dry-run -o yaml>kibana-nodeport-svc.yaml
修改一下
# cat kibana-nodeport-svc.yaml apiVersion: v1 kind: Service metadata: labels: app: kibana name: kibana-nodeport-svc spec: ports: - name: 5601-5601 port: 5601 protocol: TCP targetPort: 5601 nodePort: 5601 selector: app: kibana type: NodePort
應用
kubectl apply -f kibana-nodeport-svc.yaml
查看
# kubectl get svc kibana-nodeport-svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kibana-nodeport-svc NodePort 10.0.0.108 <none> 5601:5601/TCP 2m40s [root@k8s-master01 kibana]# kubectl get ep kibana-nodeport-svc NAME ENDPOINTS AGE kibana-nodeport-svc 172.17.84.4:5601 2m45s
web頁面使用nodeip加端口訪問