helm3部署es kubernetes高可用集群-實踐記錄(4)


helm使用hostpath部署es

環境匯總:
節點數:3 nodes 91/92/93
k8s: v1.20.5
helm: v3.2.0
elasticsearch: 6.8.18

1.創建操作空間&前期准備

參考:快速搭建Kubernetes高可用集群七 ELKSTACK 部署 https://www.longger.net/article/33179.html

# 創建elk的namespace
kubectl create ns elk

# 拉取es鏡像,后面需要獲取證書
docker pull elasticsearch:6.8.18

## 生成證書
# 運行容器生成證書
docker run --name elastic-charts-certs -i -w /app elasticsearch:6.8.18 /bin/sh -c  \
  "elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \
    elasticsearch-certutil cert --name security-master --dns \
    security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12"

# 從容器中將生成的證書拷貝出到當前目錄
docker cp elastic-charts-certs:/app/elastic-certificates.p12 ./ 

# 刪除容器
docker rm -f elastic-charts-certs

# 將 pcks12 中的信息分離出來,寫入文件
openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem

## 添加證書
kubectl create secret -n elk generic elastic-certificates --from-file=elastic-certificates.p12
kubectl -n elk create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem

# 設置集群用戶名密碼,用戶名不建議修改
kubectl create secret -n elk generic elastic-credentials \
  --from-literal=username=elastic --from-literal=password=elastic123456
  
# 查看生成的證書及秘鑰庫
kubectl get secret -n elk

2.helm拉取&更新repo

helm repo add elastic https://helm.elastic.co 
helm repo update

3.提前pull鏡像

# elasticsearch
docker pull elasticsearch:6.8.18

docker tag elasticsearch:6.8.18 docker.elastic.co/elasticsearch/elasticsearch:6.8.18

# kibana
docker pull kibana:6.8.18

docker tag kibana:6.8.18 docker.elastic.co/kibana/kibana:6.8.18

4.使用hostpath作為local storage的存儲卷

需先創建好pv, storageclass-master與data各自創建自己的pv

參考:PV、PVC、StorageClass講解 https://www.cnblogs.com/rexcheny/p/10925464.html

pv-master節點

# local-pv-master1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: es-master-pv1 # 多個master節點設置多個master的pv
spec:
  capacity:
    storage: 10Gi
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce # hostpath下的讀寫特性,單節點讀寫
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage
  local: # local類型
    path: /mnt/data/master/vol01  # 節點上的具體路徑,根據實際情況定
  nodeAffinity: # 這里就設置了節點親和
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - node1 # 這里我們使用node1節點,該節點有/data/vol1路徑

pv-data節點

# local-pv-data1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: es-data-pv1 # 多個data節點設置多個data的pv
spec:
  capacity:
    storage: 100Gi # 大小根據請款設置,測試環境設的100G
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce # hostpath下的讀寫特性,單節點讀寫
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage # 同storageclass的設置name一直
  local: # local類型
    path: /mnt/data  # 節點上的具體路徑
  nodeAffinity: # 這里就設置了節點親和
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname # 命令 kubectl get no --show-labels
          operator: In
          values:
          - node1 # 這里我們使用node01節點,該節點有/data/vol1路徑

storageclass

# local-storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local-storage # 與pv的設置的name一致
provisioner: kubernetes.io/no-provisioner # 動態供給插件
volumeBindingMode: WaitForFirstConsumer

設置pv & storageclass

kubectl apply -f local-pv-master1[2|3].yaml -n elk # 在elk的namespace下
kubectl apply -f local-pv-data1[2|3].yaml -n elk # 在elk的namespace下
kubectl apply -f local-storageclass.yaml -n elk # 在elk的namespace下

5.准備helm安裝節點的values yaml

參考:Helm 安裝 ElasticSearch & Kibana 7.x 版本 http://www.mydlq.club/article/13

es-master

---
# 集群名稱
clusterName: "helm"
# 節點所屬群組
nodeGroup: "master"
# Master 節點的服務地址,這里是Master,不需要
masterService: ""
# 節點類型:
roles:
  master: "true"
  ingest: "false"
  data: "false"
# 節點數量,做為 Master 節點,數量必須是 node >=3 and node mod 2 == 1
replicas: 1 # 節點數量按情況設置,本人測試設置1,官方3
minimumMasterNodes: 1
esMajorVersion: ""
esConfig:
  # elasticsearch.yml 的配置,主要是數據傳輸和監控的開關及證書配置
  elasticsearch.yml: |
    xpack:
      security:
        enabled: true
        transport:
          ssl:
            enabled: true
            verification_mode: certificate
            keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
            truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
      monitoring:
        collection:
          enabled: true
# 設置 ES 集群的 elastic 賬號密碼為變量
extraEnvs:
  - name: ELASTIC_USERNAME
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: username
  - name: ELASTIC_PASSWORD
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: password
envFrom: []
# 掛載證書位置
secretMounts:
  - name: elastic-certificates
    secretName: elastic-certificates
    path: /usr/share/elasticsearch/config/certs
# 鏡像拉取來源,我對鏡像做了一些簡單的修改,故放置於自建的 harbor 里。
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "6.8.18"
imagePullPolicy: "IfNotPresent"
imagePullSecrets:
  - name: registry-secret
podAnnotations: {}
labels: {}
# ES 的 JVM 內存
esJavaOpts: "-Xmx512m -Xms512m" # 內存不要設太大,根據自己機器情況定,如果一致unready,建議512m
# ES 運行所需的資源
resources:
  requests:
    cpu: "500m"
    memory: "1Gi"
  limits:
    cpu: "500m"
    memory: "1Gi"
initResources: {}
sidecarResources: {}
# ES 的服務 IP,如果沒有設置這個,服務有可能無法啟動。
networkHost: "0.0.0.0"
# ES 的存儲配置
volumeClaimTemplate:
  storageClassName: "local-storage" # 與前面的storageclass.yaml一致
  accessModes: [ "ReadWriteOnce" ]
  resources:
    requests:
      storage: 5Gi # 大小一致
# PVC 開關
persistence:
  enabled: true
  labels:
    enabled: false
  annotations: {}
# rbac 暫未詳細研究
rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""
# 鏡像部署選擇節點
nodeSelector:
  kubernetes.io/hostname: node1
# 容忍污點,如果 K8S 集群節點較少,需要在 Master 節點部署,需要使用此項
tolerations:
  - operator: "Exists"

es-data

---
# 集群名稱,必須和 Master 節點的集群名稱保持一致
clusterName: "helm"
# 節點類型
nodeGroup: "data"
# Master 節點服務名稱
masterService: "helm-master"
# 節點權限,為 True 的是提供相關服務,Data 節點不需要 Master 權限
roles:
  master: "false"
  ingest: "true"
  data: "true"
# 節點數量
replicas: 1  # 按實際情況設置,測試設為1,官方3
esMajorVersion: "6"
esConfig:
# elasticsearch.yml 配置,同 Master 節點配置
  elasticsearch.yml: |
    xpack:
      security:
        enabled: true
        transport:
          ssl:
            enabled: true
            verification_mode: certificate
            keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
            truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
      monitoring:
        collection:
          enabled: true
extraEnvs:
# 同 Master 節點配置
  - name: ELASTIC_USERNAME
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: username
  - name: ELASTIC_PASSWORD
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: password
envFrom: []
secretMounts:
# 證書掛載,同 Master 節點配置
  - name: elastic-certificates
    secretName: elastic-certificates
    path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "6.8.18"
imagePullPolicy: "IfNotPresent"
imagePullSecrets:
  - name: registry-secret
podAnnotations: {}
labels: {}
# ES節點的 JVM 內存分配,根據實際情況進行增加
esJavaOpts: "-Xmx512m -Xms512m"

# ES 運行所需的資源
resources:
  requests:
    cpu: "1000m"
    memory: "1Gi"
  limits:
    cpu: "1000m"
    memory: "1Gi"
initResources: {}
sidecarResources: {}
# ES 的服務 IP,如果沒有設置這個,服務有可能無法啟動。
networkHost: "0.0.0.0"

# ES 數據存儲
volumeClaimTemplate:
  storageClassName: "local-storage"
  accessModes: [ "ReadWriteOnce" ]
  resources:
    requests:
      storage: 10Gi

# PVC 開關
persistence:
  enabled: true
  labels:
    enabled: false
  annotations: {}

# rbac 暫未詳細研究
rbac:
  create: false
  serviceAccountAnnotations: {}
  serviceAccountName: ""
# 鏡像部署選擇節點
# nodeSelector:
#   elk-rolse: data
# 容忍污點,如果 K8S 集群節點較少,需要在 Master 節點部署,需要使用此項
tolerations:
  - operator: "Exists"

es-client

# ============設置集群名稱============
## 設置集群名稱
clusterName: "helm"
## 設置節點名稱
nodeGroup: "client"
## 設置角色
roles:
  master: "false"
  ingest: "false"
  data: "false"
# Master 節點服務名稱
masterService: "helm-master"

# ============鏡像配置============
## 指定鏡像與鏡像版本
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "6.8.18"
## 副本數
replicas: 1

# ============資源配置============
## JVM 配置參數
esJavaOpts: "-Xmx512m -Xms512m"
## 部署資源配置(生成環境一定要設置大些)
resources:
  requests:
    cpu: "1000m"
    memory: "2Gi"
  limits:
    cpu: "1000m"
    memory: "2Gi"
## 數據持久卷配置
persistence:
  enabled: false

# ============安全配置============
## 設置協議,可配置為 http、https
protocol: http
## 證書掛載配置,這里我們掛入上面創建的證書
secretMounts:
  - name: elastic-certificates
    secretName: elastic-certificates
    path: /usr/share/elasticsearch/config/certs
## 允許您在/usr/share/elasticsearch/config/中添加任何自定義配置文件,例如 elasticsearch.yml
## ElasticSearch 7.x 默認安裝了 x-pack 插件,部分功能免費,這里我們配置下
## 下面注掉的部分為配置 https 證書,配置此部分還需要配置 helm 參數 protocol 值改為 https
esConfig:
  elasticsearch.yml: |
    xpack.security.enabled: true
    xpack.security.transport.ssl.enabled: true
    xpack.security.transport.ssl.verification_mode: certificate
    xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
    xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
    # xpack.security.http.ssl.enabled: true
    # xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
    # xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12    
## 環境變量配置,這里引入上面設置的用戶名、密碼 secret 文件
extraEnvs:
  - name: ELASTIC_USERNAME
    valueFrom:
      secretKeyRef:
        name: elastic-credentials # 與release namespace一致
        key: username
  - name: ELASTIC_PASSWORD
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: password

# ============Service 配置============
service:
  type: NodePort
  nodePort: "30200"

6.helm install pod

執行順序:master>data>client
# master節點
helm install es-m -nelk -f es-master.yaml elastic/elasticsearch --version 6.8.18 --debug
# data節點
helm install es-d -nelk -f es-data.yaml elastic/elasticsearch --version 6.8.18 --debug
# client節點
helm install es-c -nelk -f es-client.yaml elastic/elasticsearch --version 6.8.18 --debug

7.查看狀態及測試功能

# 查看pod的情況
watch kubectl get po -n elk -o wide
---
NAME            READY   STATUS    RESTARTS   AGE    IP             NODE    NOMINATED NODE   READINESS GATES
helm-client-0   1/1     Running   0          6h2m   10.233.96.81   node2   <none>           <none>
helm-data-0     1/1     Running   0          45m    10.233.96.84   node2   <none>           <none>
helm-master-0   1/1     Running   0          45m    10.233.90.89   node1   <none>           <none>
---

# 如pod一直pending或其他不正常狀態,查看原因
kubectl describe po helm-data-0 -nelk # -n elk也可以

# 查看pod的日志
kubectl logs  helm-master-0 -nelk

8.問題匯總

8.1 pv pvc sc的設置

關於存儲卷的分類:
- nfs
- ceph
- local volume

8.2 節點親和性問題

節點的親和性與反親和性

8.3 es起來了但不是ready狀態-jvm內存問題

openjdk提示useavx=2不支持本cpu問題

9.部署kibana

創建kibana-values.yaml

# ============鏡像配置============
## 指定鏡像與鏡像版本
image: "docker.elastic.co/kibana/kibana"
imageTag: "6.8.18"

## 配置 ElasticSearch 地址
elasticsearchHosts: "http://helm-client:9200" # 和集群的命名保持一致

# ============環境變量配置============
## 環境變量配置,這里引入上面設置的用戶名、密碼 secret 文件
extraEnvs:
  - name: 'ELASTICSEARCH_USERNAME'
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: username
  - name: 'ELASTICSEARCH_PASSWORD'
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: password

# ============資源配置============
resources:
  requests:
    cpu: "500m"
    memory: "1Gi"
  limits:
    cpu: "500m"
    memory: "1Gi"

# ============配置 Kibana 參數============ 
## kibana 配置中添加語言配置,設置 kibana 為中文
kibanaConfig:
  kibana.yml: |
        i18n.locale: "zh-CN"

# ============Service 配置============
service:
  type: NodePort
  nodePort: "30601"

helm創建kibana應用:

# install 應用
helm install -nelk kibana elastic/kibana -f kibana-values.yaml --version 6.8.18 --debug

# 查看日志 
kubectl logs kibana-kibana-875887d58-846nw -nelk

# 查看啟動的描述
kubectl describe po kibana-kibana-875887d58-846nw -nelk

驗證kibana連接es成功,登錄時輸入自己設置的user/password即可

驗證連接節點:

插入數據並驗證:

10.查看helm已安裝的release

使用命令helm list [-n mynamespace]


可以看到部署的4個release,分別是es-master/data/client, kibana已全部部署成功。

11.部署filebeat

部署的配置文件:

# 使用鏡像
image: "docker.elastic.co/beats/filebeat"
imageTag: "6.8.18"
# 添加配置
filebeatConfig:
  filebeat.yml: |
    filebeat.inputs:
    - type: docker
      containers.ids:
      - '*'
      processors:
      - add_kubernetes_metadata:
          in_cluster: true
    output.elasticsearch:
      # elasticsearch 用戶
      username: 'elastic'
      # elasticsearch 密碼
      password: 'elastic123456'
      # elasticsearch 主機
      hosts: ["helm-client:9200"]
# 環境變量
extraEnvs:
  - name: 'ELASTICSEARCH_USERNAME'
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: username
  - name: 'ELASTICSEARCH_PASSWORD'
    valueFrom:
      secretKeyRef:
        name: elastic-credentials
        key: password

部署filebeat:
helm install filebeat -f filebeat-values.yaml -nelk elastic/filebeat --version 6.8.18 --debug
此filebeat是以daemonset形式,所以只要默認設置下,應該每個節點都會安裝filebeat應用。

查看filebeat的pod, 可以看到已經ready狀態:

進一步查看kibana上的filebeat日志:


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM