apollo小結
課程目錄
一、容器雲監控prometheus概述
https://prometheus.io/docs/introduction/overview/ #官方文檔
https://github.com/prometheus/prometheus #代碼github官網
原理
Exporter相當於監控通信的中間件
Pushgateway比pull速度快,讓job主動將數據發送給pushgateway
服務發現(難度大) ,靜態配置發現目標
Grafana專門做數據展示
架構介紹
Promtheus對比zabbix
二、監控必備exporter
部署kube-state-metrics
監控k8s基礎信息,有多少個dp,svc,有哪些pod,通過這些基礎數據,訪問集群元數據
拉取上傳鏡像
運維主機HDSS7-200.host.com上:
[root@hdss7-200 ~]# docker pull quay.io/coreos/kube-state-metrics:v1.5.0
v1.5.0: Pulling from coreos/kube-state-metrics
cd784148e348: Pull complete
f622528a393e: Pull complete
Digest: sha256:b7a3143bd1eb7130759c9259073b9f239d0eeda09f5210f1cd31f1a530599ea1
Status: Downloaded newer image for quay.io/coreos/kube-state-metrics:v1.5.0
[root@hdss7-200 ~]# docker tag 91599517197a harbor.od.com/public/kube-state-metrics:v1.5.0
[root@hdss7-200 ~]# docker push harbor.od.com/public/kube-state-metrics:v1.5.0
資源配置清單rbac
[root@hdss7-200 ~]# mkdir /data/k8s-yaml/kubu-state-metrics
[root@hdss7-200 kubu-state-metrics]# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: kube-system
資源配置清單deploy
[root@hdss7-200 kubu-state-metrics]# cat dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "2"
labels:
grafanak8sapp: "true"
app: kube-state-metrics
name: kube-state-metrics
namespace: kube-system
spec:
selector:
matchLabels:
grafanak8sapp: "true"
app: kube-state-metrics
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
grafanak8sapp: "true"
app: kube-state-metrics
spec:
containers:
- image: harbor.od.com/public/kube-state-metrics:v1.5.0
name: kube-state-metrics
ports:
- containerPort: 8080
name: http-metrics
protocol: TCP
readinessProbe: #就緒性探針,對於不可達pod資源不提供訪問資源.不斷檢查該pod是否正常,防止pod關了網頁404.
failureThreshold: 3
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always
serviceAccount: kube-state-metrics
serviceAccountName: kube-state-metrics
應用資源配置清單
kubectl apply -f http://k8s-yaml.od.com/kubu-state-metrics/rbac.yaml
kubectl apply -f http://k8s-yaml.od.com/kubu-state-metrics/dp.yaml
判斷正常啟動
部署node-exporter
#作用:監控運算節點宿主機資源,cpu,內存等宿主機資源
拉取上傳鏡像
運維主機HDSS7-200.host.com上:
[root@hdss7-200 kubu-state-metrics]# docker pull prom/node-exporter:v0.15.0
docker tag b3e7f67a1480 harbor.od.com/public/node-exporter:v0.15.0
[root@hdss7-200 kubu-state-metrics]# docker push harbor.od.com/public/node-exporter:v0.15.0
[root@hdss7-200 kubu-state-metrics]# mkdir /data/k8s-yaml/node-exporter/
資源配置清單daemonset
Daemonset會在每個運算節點開一個pod
[root@hdss7-200 node-exporter]# vi ds.yaml
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: node-exporter
namespace: kube-system
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
selector:
matchLabels:
daemon: "node-exporter"
grafanak8sapp: "true"
template:
metadata:
name: node-exporter
labels:
daemon: "node-exporter"
grafanak8sapp: "true"
spec:
volumes:
- name: proc
hostPath:
path: /proc
type: ""
- name: sys
hostPath:
path: /sys
type: ""
containers:
- name: node-exporter
image: harbor.od.com/public/node-exporter:v0.15.0
args:
- --path.procfs=/host_proc
- --path.sysfs=/host_sys
ports:
- name: node-exporter
hostPort: 9100 #暴露的主機端口
containerPort: 9100
protocol: TCP
volumeMounts:
- name: sys
readOnly: true
mountPath: /host_sys
- name: proc
readOnly: true
mountPath: /host_proc
imagePullSecrets:
- name: harbor
restartPolicy: Always
hostNetwork: true
[root@hdss7-21 ~]# curl 10.4.7.21:9100/metrics #取出宿主機資源使用信息
確認安裝成功
應用資源配置清單
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/node-exporter/ds.yaml
部署cadivisor
監控pop使用資源,cpu,內存等
注意版本,與grafana有些地方有bug,28版本比較推薦
運維主機HDSS7-200.host.com上:
拉取上傳鏡像
[root@hdss7-200 node-exporter]# docker pull google/cadvisor:v0.28.3
[root@hdss7-200 node-exporter]# docker tag 75f88e3ec333 harbor.od.com/public/cadvisor:v0.28.3
[root@hdss7-200 ~]# docker push !$
資源配置清單daemonset
[root@hdss7-200 node-exporter]# mkdir /data/k8s-yaml/cadvisor
vi ds.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cadvisor
namespace: kube-system
labels:
app: cadvisor
spec:
selector:
matchLabels:
name: cadvisor
template:
metadata:
labels:
name: cadvisor
spec:
hostNetwork: true
tolerations: #容忍污點節點,在污點節點允許創建pod,系統會盡量將pod放在污點節點上運行
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: cadvisor
image: harbor.od.com/public/cadvisor:v0.28.3
imagePullPolicy: IfNotPresent
volumeMounts:
- name: rootfs
mountPath: /rootfs
readOnly: true
- name: var-run
mountPath: /var/run
- name: sys
mountPath: /sys
readOnly: true
- name: docker
mountPath: /var/lib/docker
readOnly: true
ports:
- name: http
containerPort: 4194
protocol: TCP
readinessProbe:
tcpSocket:
port: 4194
initialDelaySeconds: 5
periodSeconds: 10
args:
- --housekeeping_interval=10s
- --port=4194
terminationGracePeriodSeconds: 30
volumes:
- name: rootfs
hostPath:
path: /
- name: var-run
hostPath:
path: /var/run
- name: sys
hostPath:
path: /sys
- name: docker
hostPath:
path: /data/docker
修改運算節點軟連接
所有運算節點上:
mount -o remount,rw /sys/fs/cgroup/ #remount重新掛載,不需要加設備
ln -s /sys/fs/cgroup/cpu,cpuacct /sys/fs/cgroup/cpuacct,cpu #創建一個新的軟鏈接,修改后狀態,容器用的目錄名稱
ll /sys/fs/cgroup/ | grep cpu
應用資源配置清單
任意運算節點上:
kubectl apply -f http://k8s-yaml.od.com/cadvisor/ds.yaml
netstat -luntp|grep 4194 #檢查ds暴露的端口是否開啟
污點節點
加角色標簽
標簽可以過濾某些節點
[root@hdss7-21 cert]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/master=
[root@hdss7-21 cert]# kubectl label node hdss7-21.host.com node-role.kubernetes.io/node=
[root@hdss7-22 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 15h v1.15.2
hdss7-22.host.com Ready master,node 12m v1.15.2
影響k8s調度策略的三種方法:
1.污點,容忍度方法
2.容忍度:pod是否能夠榮仍污點
Nodename:讓pod運行在指定在node上
Nodeselector: 通過標簽選擇器,讓pod運行指定的一類node上
運算節點打污點
針對於調度器schedule,不使用主節點調度器,使用資源清單的調度規則
#污染21節點,不使用系統調度規則
#這個污點的key為node-role.kubernetes.io/master value為master,,,動作為:NoSchedule
[root@hdss7-21 ~]# kubectl taint node hdss7-21.host.com node-role.kubernetes.io/master=master:NoSchedule
node/hdss7-21.host.com tainted
污點使用小結
Key-value,value可以為空,為空時匹配的規則就是key值相同
污點: 針對node--->運算節點node上的污點
容忍: 針對pod資源清單加的容忍污點,匹配到的節點污點規則相同,將能夠在該污點節點上運行
刪除污點
[root@hdss7-21 ~]# kubectl taint node hdss7-21.host.com node-role.kubernetes.io/master- #刪除污點,注意master-
刪除污點使用key-就可以刪除
污點容忍度補充
#在2個節點上創建2個pod后,縮容成1個,再到沒有該pod的21節點打污點測試
#在21節點上加污點
#打了一個污點,污點的key叫quedian,value叫buxijiao,匹配到這個key-value后執行的動作是noschedule
Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoSchedule
Kubectl describe node hdss7-21.host.com
容忍污點資源清單配置
#寫在container上面,第二個spec下面
污點容忍后,就可以在打了污點的節點上運行
污點2種動作:
Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoSchedule #對於沒匹配到的不允許調度到該污點節點
Kubectl taint node hdss7-21.host.com quedian=buxijiao:NoExecute #對於匹配到的,允許調度到該節點,但不允許你運行你的pod
# 被打了NoExecute的污點,如果有其他節點正常,一般也不會調度過去.
去掉污點就是,key-,,,value可以不寫的,污點區分主要是靠key
#因為21節點上有2個污點,不洗腳和不洗澡,pod也得容忍2個這樣的污點才能調度過去. #只容忍1個污點調度不過去
應用場景
1. 針對io密集型來區分到不同磁盤類型的節點
2. ,還可以根據占用內存特別多的promtheus單獨跑一個節點
3. 某個運算節點要下線維修,將該節點打上污點,將pod驅逐;
Kubectl taint node hdss7-21.host.com key=broken:NoExecute
部署blackbox-expoter
最常用的監控組件:監控業務容器的存活性,promtheus帶着參數來請求blackbox
探測項目存活性,如果項目有http接口,一般走http,沒有走tcp,只能走這2種接口.
Blackbox探測項目時,需要在項目pod資源清單加上annotion或者label的匹配規則,供promtheus匹配
#tcp監控項
#http監控項
伸縮並不會報警,因為annotation在pod資源清單中,縮容后,annotation也會消失.
下載上傳鏡像
運維主機HDSS7-200.host.com上:
[root@hdss7-200 blackbox-exporter]# docker pull prom/blackbox-exporter:v0.15.1
[root@hdss7-200 blackbox-exporter]# docker tag 81b70b6158be harbor.od.com/public/blackbox-exporter:v0.15.1
[root@hdss7-200 ~]# docker push harbor.od.com/public/blackbox-exporter:v0.15.1
准備資源配置清單
• ConfigMap
• Deployment
• Service
• Ingress
mkdir /data/k8s-yaml/blackbox-exporter/
vi /data/k8s-yaml/blackbox-exporter/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: blackbox-exporter
name: blackbox-exporter
namespace: kube-system
data:
blackbox.yml: |-
modules:
http_2xx:
prober: http
timeout: 2s
http:
valid_http_versions: ["HTTP/1.1", "HTTP/2"]
valid_status_codes: [200,301,302]
method: GET
preferred_ip_protocol: "ip4"
tcp_connect:
prober: tcp
timeout: 2s
vi /data/k8s-yaml/blackbox-exporter/deployment.yaml
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: blackbox-exporter
namespace: kube-system
labels:
app: blackbox-exporter
annotations:
deployment.kubernetes.io/revision: 1
spec:
replicas: 1
selector:
matchLabels:
app: blackbox-exporter
template:
metadata:
labels:
app: blackbox-exporter
spec:
volumes:
- name: config
configMap:
name: blackbox-exporter
defaultMode: 420
containers:
- name: blackbox-exporter
image: harbor.od.com/public/blackbox-exporter:v0.15.1
args:
- --config.file=/etc/blackbox_exporter/blackbox.yml
- --log.level=debug
- --web.listen-address=:9115
ports:
- name: blackbox-port
containerPort: 9115
protocol: TCP
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 50Mi
volumeMounts:
- name: config
mountPath: /etc/blackbox_exporter
readinessProbe: #就緒性探針,對於不存活的節點不提供訪問
tcpSocket:
port: 9115
initialDelaySeconds: 5
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
imagePullPolicy: IfNotPresent
imagePullSecrets:
- name: harbor
restartPolicy: Always
vi /data/k8s-yaml/blackbox-exporter/service.yaml
kind: Service
apiVersion: v1
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
selector:
app: blackbox-exporter
ports:
- protocol: TCP
port: 9115
name: http
vi /data/k8s-yaml/blackbox-exporter/ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: blackbox-exporter
namespace: kube-system
spec:
rules:
- host: blackbox.od.com
http:
paths:
- backend:
serviceName: blackbox-exporter
servicePort: 9115
解析域名
HDSS7-11.host.com上
復制/var/named/od.com.zone
blackbox A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-21 ~]# dig -t A blackbox.od.com @192.168.0.2 +short
10.4.7.10
應用資源配置清單
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/configmap.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/deployment.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/service.yaml
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/blackbox-exporter/ingress.yaml
瀏覽器訪問
http://blackbox.od.com
三、部署prometheus及其配置詳解
部署prometheus
# docker pull prom/prometheus:v2.14.0
# docker tag 7317640d555e harbor.od.com/infra/prometheus:v2.14.0
# docker push harbor.od.com/infra/prometheus:v2.14.0
[root@hdss7-200 ~]# mkdir /data/k8s-yaml/prometheus
[root@hdss7-200 ~]# cd /data/k8s-yaml/prometheus
資源配置清單-rbac
[root@hdss7-200 prometheus]# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: prometheus
namespace: infra
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: prometheus
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/metrics
- services
- endpoints
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: infra
資源配置清單-dp
加上--web.enable-lifecycle啟用遠程熱加載配置文件
調用指令是curl -X POST http://localhost:9090/-/reload
storage.tsdb.min-block-duration=10m #只加載10分鍾數據到內存
storage.tsdb.retention=72h #保留72小時數據
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "5"
labels:
name: prometheus
name: prometheus
namespace: infra
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 7
selector:
matchLabels:
app: prometheus
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: prometheus
spec:
nodeName: hdss7-21.host.com #與jenkins毫內存大的錯開,指定pod運行位置
containers:
- name: prometheus
image: harbor.od.com/infra/prometheus:v2.14.0
imagePullPolicy: IfNotPresent
command:
- /bin/prometheus #啟動promtheus命令
args:
- --config.file=/data/etc/prometheus.yml #配置文件
- --storage.tsdb.path=/data/prom-db #容器里目錄
- --storage.tsdb.min-block-duration=10m #只加載10分鍾數據到內存,虛擬機測試
- --storage.tsdb.retention=72h #存多少時間的數據,測試環境
- --web.enable-lifecycle #啟用遠程熱加載配置文件
ports:
- containerPort: 9090
protocol: TCP
volumeMounts:
- mountPath: /data #容器掛載目錄
name: data
resources: #限制容器資源的一種配置方法
requests:#申請
cpu: "1000m" #1000m=1000毫核=1核
memory: "1.5Gi"
limits: #不能超過
cpu: "2000m"
memory: "3Gi"
imagePullSecrets:
- name: harbor
securityContext:
runAsUser: 0
serviceAccountName: prometheus
volumes:
- name: data
nfs:
server: hdss7-200
path: /data/nfs-volume/prometheus #宿主機目錄,上面的容器目錄掛載到該目錄
資源配資清單-svc
vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: infra
spec:
ports:
- port: 9090
protocol: TCP
targetPort: 9090
selector:
app: prometheus
資源配置清單-ingress
vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: traefik
name: prometheus
namespace: infra
spec:
rules:
- host: prometheus.od.com
http:
paths:
- path: /
backend:
serviceName: prometheus
servicePort: 9090
拷貝證書
在200主機上
創建必要的目錄,prometheus掛載出來的目錄
# mkdir -p /data/nfs-volume/prometheus/{etc,prom-db}
拷貝配置文件中用到的證書
# cd /data/nfs-volume/prometheus/etc/
# cp /opt/certs/ca.pem ./
# cp /opt/certs/client.pem ./
# cp /opt/certs/client-key.pem ./
准備prometheus的配置文件
在運維主機hdss7-200.host.com上:
修改prometheus配置文件:別問為啥這么寫,問就是不懂~
# vi /data/nfs-volume/prometheus/etc/prometheus.yml
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'etcd'
tls_config:
ca_file: /data/etc/ca.pem
cert_file: /data/etc/client.pem
key_file: /data/etc/client-key.pem
scheme: https
static_configs:
- targets:
- '10.4.7.12:2379'
- '10.4.7.21:2379'
- '10.4.7.22:2379'
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: 'kubernetes-kubelet'
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __address__
replacement: ${1}:10255
- job_name: 'kubernetes-cadvisor'
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __address__
replacement: ${1}:4194
- job_name: 'kubernetes-kube-state'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- source_labels: [__meta_kubernetes_pod_label_grafanak8sapp]
regex: .*true.*
action: keep
- source_labels: ['__meta_kubernetes_pod_label_daemon', '__meta_kubernetes_pod_node_name']
regex: 'node-exporter;(.*)'
action: replace
target_label: nodename
- job_name: 'blackbox_http_pod_probe'
metrics_path: /probe
kubernetes_sd_configs:
- role: pod
params:
module: [http_2xx]
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_blackbox_scheme]
action: keep
regex: http
- source_labels: [__address__, __meta_kubernetes_pod_annotation_blackbox_port, __meta_kubernetes_pod_annotation_blackbox_path]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+);(.+)
replacement: $1:$2$3
target_label: __param_target
- action: replace
target_label: __address__
replacement: blackbox-exporter.kube-system:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: 'blackbox_tcp_pod_probe'
metrics_path: /probe
kubernetes_sd_configs:
- role: pod
params:
module: [tcp_connect]
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_blackbox_scheme]
action: keep
regex: tcp
- source_labels: [__address__, __meta_kubernetes_pod_annotation_blackbox_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __param_target
- action: replace
target_label: __address__
replacement: blackbox-exporter.kube-system:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: 'traefik'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
action: keep
regex: traefik
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
添加dns解析:
[root@hdss7-11 ~]# vi /var/named/od.com.zone
prometheus A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A prometheus.od.com @10.4.7.11 +short
10.4.7.10
應用資源配置清單
# kubectl apply -f http://k8s-yaml.od.com/prometheus/rbac.yaml
# kubectl apply -f http://k8s-yaml.od.com/prometheus/dp.yaml
# kubectl apply -f http://k8s-yaml.od.com/prometheus/svc.yaml
# kubectl apply -f http://k8s-yaml.od.com/prometheus/ingress.yaml
檢查:
[root@hdss7-21 ~]# kubectl logs prometheus-7f656dbdcd-svm76 -n infra
瀏覽器驗證:prometheus.od.com
這里點擊status-targets,這里展示的就是我們在prometheus.yml中配置的job-name,這些targets基本可以滿足我們收集數據的需求。
配置prometheus
Configuration
決定了多少秒curl一下exporter組件,返回k8s集群信息
動作action,keep,只保留匹配到的
動作action,drop,沒標簽,沒匹配到的
函數使用方法
使pod匹配上promtheus的監控
traefik匹配監控
修改traefik的yaml:
從dashboard里找到traefik的yaml,跟labels同級添加annotations
"annotations": {
"prometheus_io_scheme": "traefik",
"prometheus_io_path": "/metrics",
"prometheus_io_port": "8080"
}
注意前面有個逗號,
Template-àmetadataàannotation
等待pod重啟以后,在去prometheus上去看
[root@hdss7-21 ~]# kubectl delete pod traefik-ingress-9jcr9 -n kube-system
[root@hdss7-21 ~]# kubectl delete pod traefik-ingress-wb7cs -n kube-system
自動發現了
監控dubbo-service
#現在就可以調用了,重啟pod,就可以監控到
Endpoint使用的是svc_name
Blackbox相當於pod到promtheus的中間件,promtheus在blackbox取數據
blackbox:
這個是檢測容器內服務存活性的,也就是端口健康狀態檢查,分為tcp和http
首先准備兩個服務,將dubbo-demo-service和dubbo-demo-consumer都調整為使用master鏡像,不依賴apollo的(節省資源)
等兩個服務起來以后,首先在dubbo-demo-service資源中添加一個TCP的annotation:
"annotations": {
"blackbox_port": "20880",
"blackbox_scheme": "tcp"
}
這里會自動發現我們服務中,運行tcp port端口為20880的服務,並監控其狀態
監控dubbo-demo-consumer
接下來在dubbo-demo-consumer資源中添加一個HTTP的annotation:
"annotations": {
"blackbox_path": "/hello?name=health",
"blackbox_port": "8080",
"blackbox_scheme": "http"
}
#檢查接口要寫對
去檢查blackbox.od.com
添加監控jvm信息的annotation:
"annotations": {
"prometheus_io_scrape": "true",
"prometheus_io_port": "12346",
"prometheus_io_path": "/"
}
dubbo-demo-service和dubbo-demo-consumer都添加:
匹配規則,要去prometheus.yml中去看。
四、部署雲監控展示平台Grafana
下載上傳鏡像:
# docker pull grafana/grafana:5.4.2
# docker tag 6f18ddf9e552 harbor.od.com/infra/grafana:v5.4.2
# docker push harbor.od.com/infra/grafana:v5.4.2
准備資源配置清單:
創建目錄
# mkdir /data/nfs-volume/grafana
cd /data/k8s-yaml/grafana
1、rbac.yaml
vi rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: grafana
rules:
- apiGroups:
- "*"
resources:
- namespaces
- deployments
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: grafana
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: grafana
subjects:
- kind: User
name: k8s-node
2、dp.yaml
vi dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
app: grafana
name: grafana
name: grafana
namespace: infra
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 7
selector:
matchLabels:
name: grafana
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: grafana
name: grafana
spec:
containers:
- name: grafana
image: harbor.od.com/infra/grafana:v5.4.2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /var/lib/grafana
name: data
imagePullSecrets:
- name: harbor
securityContext:
runAsUser: 0
volumes:
- nfs:
server: hdss7-200
path: /data/nfs-volume/grafana
name: data
3、svc.yaml
vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: infra
spec:
ports:
- port: 3000
protocol: TCP
targetPort: 3000
selector:
app: grafana
4、ingress.yaml
vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: grafana
namespace: infra
spec:
rules:
- host: grafana.od.com
http:
paths:
- path: /
backend:
serviceName: grafana
servicePort: 3000
域名解析:
[root@hdss7-11 ~]# vi /var/named/od.com.zone
grafana A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
應用資源配置清單:
# kubectl apply -f http://k8s-yaml.od.com/grafana/rbac.yaml
# kubectl apply -f http://k8s-yaml.od.com/grafana/dp.yaml
# kubectl apply -f http://k8s-yaml.od.com/grafana/svc.yaml
# kubectl apply -f http://k8s-yaml.od.com/grafana/ingress.yaml
五、Grafana配置及插件儀表盤制作
設置
裝插件
進入容器安裝插件:
安裝有點慢
# kubectl exec -it grafana-d6588db94-xr4s6 /bin/bash -n infra
grafana-cli plugins install grafana-kubernetes-app
grafana-cli plugins install grafana-clock-panel
grafana-cli plugins install grafana-piechart-panel
grafana-cli plugins install briangann-gauge-panel
grafana-cli plugins install natel-discrete-panel
刪除grafana的pod,重啟pod
[root@hdss7-21 ~]# kubectl delete pod grafana-d6588db94-7c66l -n infra
添加promtheus數據源
Add 數據集
准備證書
[root@hdss7-200 certs]# cat /opt/certs/ca.pem
[root@hdss7-200 certs]# cat /opt/certs/client.pem
[root@hdss7-200 certs]# cat /opt/certs/client-key.pem
配置插件kubernetes
K8s插件配置
7443會代理到21,22的apiserver端口6443
添加完需要稍等幾分鍾,在沒有取到數據之前,會報http forbidden,沒關系,等一會就好。大概2-5分鍾。
Container不顯示數據,改bug
sum(container_memory_usage_bytes{container_label_io_kubernetes_pod_name=~"$pod"}) by (pod_name)
sum(container_memory_usage_bytes{container_label_io_kubernetes_pod_name=~"$pod"}) by (container_label_io_kubernetes_pod_name)
刪除重建grafana-demo
加載其他dashboard
''
監控dubbo-jvm
加完注解,自動拉到監控里,再根據grafana建立的儀表盤在promtheus取數據展示
六、微服務容器接入容器雲監控原理
內容中在第五章,沒細分
七、Alertmanager組件進行監控告警
配置alert告警插件:
# docker pull docker.io/prom/alertmanager:v0.14.0
# docker tag 23744b2d645c harbor.od.com/infra/alertmanager:v0.14.0
# docker push harbor.od.com/infra/alertmanager:v0.14.0
資源配置清單:
mkdir /data/k8s-yaml/alertmanager
1、cm.yaml
vi cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: alertmanager-config
namespace: infra
data:
config.yml: |-
global:
# 在沒有報警的情況下聲明為已解決的時間
resolve_timeout: 5m
# 配置郵件發送信息
smtp_smarthost: 'smtp.qq.com'
smtp_from: '731616192@qq.com'
smtp_auth_username: '731616192@qq.com'
smtp_auth_password: 'bdieyxrflckobcag'
smtp_require_tls: false
# 所有報警信息進入后的根路由,用來設置報警的分發策略
route:
# 這里的標簽列表是接收到報警信息后的重新分組標簽,例如,接收到的報警信息里面有許多具有 cluster=A 和 alertname=LatncyHigh 這樣的標簽的報警信息將會批量被聚合到一個分組里面
group_by: ['alertname', 'cluster']
# 當一個新的報警分組被創建后,需要等待至少group_wait時間來初始化通知,這種方式可以確保您能有足夠的時間為同一分組來獲取多個警報,然后一起觸發這個報警信息。
group_wait: 30s
# 當第一個報警發送后,等待'group_interval'時間來發送新的一組報警信息。
group_interval: 5m
# 如果一個報警信息已經發送成功了,等待'repeat_interval'時間來重新發送他們
repeat_interval: 5m
# 默認的receiver:如果一個報警沒有被一個route匹配,則發送給默認的接收器
receiver: default
receivers:
- name: 'default'
email_configs:
- to: '731616192@qq.com'
send_resolved: true
2、dp.yaml
vi dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: alertmanager
namespace: infra
spec:
replicas: 1
selector:
matchLabels:
app: alertmanager
template:
metadata:
labels:
app: alertmanager
spec:
containers:
- name: alertmanager
image: harbor.od.com/infra/alertmanager:v0.14.0
args:
- "--config.file=/etc/alertmanager/config.yml"
- "--storage.path=/alertmanager"
ports:
- name: alertmanager
containerPort: 9093
volumeMounts:
- name: alertmanager-cm
mountPath: /etc/alertmanager
volumes:
- name: alertmanager-cm
configMap:
name: alertmanager-config
imagePullSecrets:
- name: harbor
3、svc.yaml
vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: alertmanager
namespace: infra
spec:
selector:
app: alertmanager
ports:
- port: 80
targetPort: 9093
應用資源配置清單
kubectl apply -f http://k8s-yaml.od.com/alertmanager/cm.yaml
kubectl apply -f http://k8s-yaml.od.com/alertmanager/dp.yaml
kubectl apply -f http://k8s-yaml.od.com/alertmanager/svc.yaml
配置基礎報警規則:
vi /data/nfs-volume/prometheus/etc/rules.yml
groups:
- name: hostStatsAlert
rules:
- alert: hostCpuUsageAlert
expr: sum(avg without (cpu)(irate(node_cpu{mode!='idle'}[5m]))) by (instance) > 0.85
for: 5m
labels:
severity: warning
annotations:
summary: "{{ $labels.instance }} CPU usage above 85% (current value: {{ $value }}%)"
- alert: hostMemUsageAlert
expr: (node_memory_MemTotal - node_memory_MemAvailable)/node_memory_MemTotal > 0.85
for: 5m
labels:
severity: warning
annotations:
summary: "{{ $labels.instance }} MEM usage above 85% (current value: {{ $value }}%)"
- alert: OutOfInodes
expr: node_filesystem_free{fstype="overlay",mountpoint ="/"} / node_filesystem_size{fstype="overlay",mountpoint ="/"} * 100 < 10
for: 5m
labels:
severity: warning
annotations:
summary: "Out of inodes (instance {{ $labels.instance }})"
description: "Disk is almost running out of available inodes (< 10% left) (current value: {{ $value }})"
- alert: OutOfDiskSpace
expr: node_filesystem_free{fstype="overlay",mountpoint ="/rootfs"} / node_filesystem_size{fstype="overlay",mountpoint ="/rootfs"} * 100 < 10
for: 5m
labels:
severity: warning
annotations:
summary: "Out of disk space (instance {{ $labels.instance }})"
description: "Disk is almost full (< 10% left) (current value: {{ $value }})"
- alert: UnusualNetworkThroughputIn
expr: sum by (instance) (irate(node_network_receive_bytes[2m])) / 1024 / 1024 > 100
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual network throughput in (instance {{ $labels.instance }})"
description: "Host network interfaces are probably receiving too much data (> 100 MB/s) (current value: {{ $value }})"
- alert: UnusualNetworkThroughputOut
expr: sum by (instance) (irate(node_network_transmit_bytes[2m])) / 1024 / 1024 > 100
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual network throughput out (instance {{ $labels.instance }})"
description: "Host network interfaces are probably sending too much data (> 100 MB/s) (current value: {{ $value }})"
- alert: UnusualDiskReadRate
expr: sum by (instance) (irate(node_disk_bytes_read[2m])) / 1024 / 1024 > 50
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual disk read rate (instance {{ $labels.instance }})"
description: "Disk is probably reading too much data (> 50 MB/s) (current value: {{ $value }})"
- alert: UnusualDiskWriteRate
expr: sum by (instance) (irate(node_disk_bytes_written[2m])) / 1024 / 1024 > 50
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual disk write rate (instance {{ $labels.instance }})"
description: "Disk is probably writing too much data (> 50 MB/s) (current value: {{ $value }})"
- alert: UnusualDiskReadLatency
expr: rate(node_disk_read_time_ms[1m]) / rate(node_disk_reads_completed[1m]) > 100
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual disk read latency (instance {{ $labels.instance }})"
description: "Disk latency is growing (read operations > 100ms) (current value: {{ $value }})"
- alert: UnusualDiskWriteLatency
expr: rate(node_disk_write_time_ms[1m]) / rate(node_disk_writes_completedl[1m]) > 100
for: 5m
labels:
severity: warning
annotations:
summary: "Unusual disk write latency (instance {{ $labels.instance }})"
description: "Disk latency is growing (write operations > 100ms) (current value: {{ $value }})"
- name: http_status
rules:
- alert: ProbeFailed
expr: probe_success == 0
for: 1m
labels:
severity: error
annotations:
summary: "Probe failed (instance {{ $labels.instance }})"
description: "Probe failed (current value: {{ $value }})"
- alert: StatusCode
expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
for: 1m
labels:
severity: error
annotations:
summary: "Status Code (instance {{ $labels.instance }})"
description: "HTTP status code is not 200-399 (current value: {{ $value }})"
- alert: SslCertificateWillExpireSoon
expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30
for: 5m
labels:
severity: warning
annotations:
summary: "SSL certificate will expire soon (instance {{ $labels.instance }})"
description: "SSL certificate expires in 30 days (current value: {{ $value }})"
- alert: SslCertificateHasExpired
expr: probe_ssl_earliest_cert_expiry - time() <= 0
for: 5m
labels:
severity: error
annotations:
summary: "SSL certificate has expired (instance {{ $labels.instance }})"
description: "SSL certificate has expired already (current value: {{ $value }})"
- alert: BlackboxSlowPing
expr: probe_icmp_duration_seconds > 2
for: 5m
labels:
severity: warning
annotations:
summary: "Blackbox slow ping (instance {{ $labels.instance }})"
description: "Blackbox ping took more than 2s (current value: {{ $value }})"
- alert: BlackboxSlowRequests
expr: probe_http_duration_seconds > 2
for: 5m
labels:
severity: warning
annotations:
summary: "Blackbox slow requests (instance {{ $labels.instance }})"
description: "Blackbox request took more than 2s (current value: {{ $value }})"
- alert: PodCpuUsagePercent
expr: sum(sum(label_replace(irate(container_cpu_usage_seconds_total[1m]),"pod","$1","container_label_io_kubernetes_pod_name", "(.*)"))by(pod) / on(pod) group_right kube_pod_container_resource_limits_cpu_cores *100 )by(container,namespace,node,pod,severity) > 80
for: 5m
labels:
severity: warning
annotations:
summary: "Pod cpu usage percent has exceeded 80% (current value: {{ $value }}%)"
在prometheus.yml中添加配置:
[root@hdss7-200 alertmanager]# vi /data/nfs-volume/prometheus/etc/prometheus.yml
alerting:
alertmanagers:
- static_configs:
- targets: ["alertmanager"]
rule_files:
- "/data/etc/rules.yml"
平滑重啟promtheus
#修改完配置文件重啟promtheus
#在21節點上(promtheus運行所在節點)
[root@hdss7-21 ~]# ps aux|grep prometheus
[root@hdss7-21 ~]# kill -SIGHUP 3441 #promtheus支持kill傳遞信號重載
重載配置:
# curl -X POST http://prometheus.od.com/-/reload
以上這些就是我們的告警規則
測試告警:
把app命名空間里的dubbo-demo-service給停掉:
看下blackbox里的信息:
看下alert:
紅色的時候就開會發郵件告警:
已經收到告警了,后續上生產,還會更新如何添加微信、釘釘、短信告警
如果需要自己定制告警規則和告警內容,需要研究一下promql,自己修改配置文件。
八、課程總結
對應關系
Exporter
4種,實現不同功能
Promtheus server
Retrieve(數據收集器)-à在exporter取數據-à存儲到tsdb(時間序列數據庫)
Configure 靜態配置,動態服務發現,基於文件服務發現
Httpserver,promtheus提供的一個web訪問界面