1.1: 安裝配置etcd
1.停掉master節點的etcd服務;在node2節點安裝etcd並配置;
[root@k8s-master02 ~]# grep -v '^#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.13:2379"
systemctl start etcd
1.2 安裝配置master01的api-server,controller-manager,scheduler(127.0.0.1:8080)
1.配置apiserver指向node2
[root@k8s-master01 ~]# vim /etc/kubernetes/apiserver +17
KUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.13:2379"
[root@k8s-master01 ~]# vim /etc/kubernetes/config +22
KUBE_MASTER="--master=http://127.0.0.1:8080"
1.3 安裝配置master02的api-server,controller-manager,scheduler(127.0.0.1:8080)
[root@k8s-master02 ~]# yum install kubernetes-master
[root@k8s-master02 ~]# scp -rp 10.0.0.11:/etc/kubernetes/apiserver /etc/kubernetes/apiserver
[root@k8s-master02 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://127.0.0.1:8080"
[root@k8s-master02 ~]# systemctl restart kube-apiserver.service kube-controller-manager.service kube-scheduler.service
[root@k8s-master02 ~]# systemctl enable kube-apiserver.service kube-controller-manager.service kube-scheduler.service
[root@k8s-master02 ~]# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
controller-manager Healthy ok
測試兩個master節點數據是否共享?
master1創建pod
[root@k8s-master01 deploy]# kubectl create -f k8s_deploy.yml
deployment "nginx-deployment" created
[root@k8s-master01 deploy]# kubectl get all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx-deployment 3 3 3 3 5s
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes 10.254.0.1 <none> 443/TCP 7m
NAME DESIRED CURRENT READY AGE
rs/nginx-deployment-2807576163 3 3 3 5s
NAME READY STATUS RESTARTS AGE
po/nginx-deployment-2807576163-ft2bx 1/1 Running 0 5s
po/nginx-deployment-2807576163-r6ww3 1/1 Running 0 5s
po/nginx-deployment-2807576163-rmnp1 1/1 Running 0 5s
[root@k8s-master deploy]# kubectl get node
NAME STATUS AGE
10.0.0.12 Ready 8m
10.0.0.13 Ready 7m
=============================================================================
master2節點之間可以查看到:
[root@k8s-master02 ~]# kubectl get all
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes 10.254.0.1 <none> 443/TCP 7m
[root@k8s-master02 ~]# kubectl get all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx-deployment 3 3 3 3 49s
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes 10.254.0.1 <none> 443/TCP 8m
NAME DESIRED CURRENT READY AGE
rs/nginx-deployment-2807576163 3 3 3 49s
NAME READY STATUS RESTARTS AGE
po/nginx-deployment-2807576163-ft2bx 1/1 Running 0 49s
po/nginx-deployment-2807576163-r6ww3 1/1 Running 0 49s
po/nginx-deployment-2807576163-rmnp1 1/1 Running 0 49s
1.4 為master01和master02安裝配置Keepalived
1.安裝keepalived
[root@k8s-master01 ~]# yum install keepalived.x86_64 -y
[root@k8s-master02 ~]# yum install keepalived.x86_64 -y
2.配置keepalived
[root@k8s-master01 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id master
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 50
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3
}
}
[root@k8s-master02 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id backup
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 50
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3
}
}
3.測試一下VIP
1.5: 所有node節點kubelet,kube-proxy指向api-server的vip
1.node節點修改kubelet指向虛擬VIP
[root@k8s-node-1 ~]# vim /etc/kubernetes/kubelet
KUBELET_API_SERVER="--api-servers=http://10.0.0.3:8080"
[root@k8s-node-2 ~]# vim /etc/kubernetes/kubelet
KUBELET_API_SERVER="--api-servers=http://10.0.0.3:8080"
2.node節點修改kube-proxy指向虛擬VIP
[root@k8s-node-1 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://10.0.0.3:8080"
[root@k8s-node-2 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://10.0.0.3:8080"
systemctl restart kubelet.service kube-proxy.service
1.6: 測試高可用
1.停掉master01或者master02的keepalived服務和apiserver進行實戰測試
[root@k8s-master ~]# systemctl stop keepalived.service
systemctl stop kube-apiserver.service
2.master02還是可以取到對應的值
[root@k8s-master02 ~]# kubectl get all -o wide
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx-deployment 3 3 3 3 22m
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
svc/kubernetes 10.254.0.1 <none> 443/TCP 30m <none>
NAME DESIRED CURRENT READY AGE CONTAINER(S) IMAGE(S) SELECTOR
rs/nginx-deployment-2807576163 3 3 3 22m nginx 10.0.0.11:5000/nginx:1.13 app=nginx,pod-template-hash=2807576163
NAME READY STATUS RESTARTS AGE IP NODE
po/nginx-deployment-2807576163-ft2bx 1/1 Running 0 22m 172.18.55.2 10.0.0.13
po/nginx-deployment-2807576163-kdltg 1/1 Running 0 18s 172.18.55.4 10.0.0.13
po/nginx-deployment-2807576163-r6ww3 1/1 Running 0 22m 172.18.55.3 10.0.0.13
3.node節點通過vip遠程連接測試
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 get all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx-deployment 3 3 3 3 23m
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes 10.254.0.1 <none> 443/TCP 31m
NAME DESIRED CURRENT READY AGE
rs/nginx-deployment-2807576163 3 3 3 23m
NAME READY STATUS RESTARTS AGE
po/nginx-deployment-2807576163-ft2bx 1/1 Running 0 23m
po/nginx-deployment-2807576163-kdltg 1/1 Running 0 1m
po/nginx-deployment-2807576163-r6ww3 1/1 Running 0 23m
4.node節點遠程連接測試暴露pod端口
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 expose deploy/nginx-deployment --port=80 --target-port=80 --type=NodePort
service "nginx-deployment" exposed
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 get all
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
deploy/nginx-deployment 3 3 3 3 27m
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/kubernetes 10.254.0.1 <none> 443/TCP 35m
svc/nginx-deployment 10.254.48.48 <nodes> 80:48846/TCP 14s
NAME DESIRED CURRENT READY AGE
rs/nginx-deployment-2807576163 3 3 3 27m
NAME READY STATUS RESTARTS AGE
po/nginx-deployment-2807576163-ft2bx 1/1 Running 0 27m
po/nginx-deployment-2807576163-kdltg 1/1 Running 0 5m
po/nginx-deployment-2807576163-r6ww3 1/1 Running 0 27m
到這里,k8s集群高可用搭建接近尾聲,還有很多不足處,希望更多網友加以改進!!!因為這里etcd是單節點,可以提升至etcd集群,keepalived服務建議編寫腳本來探測服務的狀態,當某主節點的服務宕掉,立刻漂移至備節點,當主節點恢復時,VIP又漂移回主節點,保障我們的業務能夠7*24高效運行!