1.k8S核心資源管理方法
1.1.陳述式資源管理方法
1.1.1.管理名稱空間資源
1.1.1.1.查看名稱空間
[root@hdss7-21 ~]# kubectl get namespace
NAME STATUS AGE
default Active 6d12h
kube-node-lease Active 6d12h
kube-public Active 6d12h
kube-system Active 6d12h
[root@hdss7-21 ~]# kubectl get ns
NAME STATUS AGE
default Active 6d12h
kube-node-lease Active 6d12h
kube-public Active 6d12h
kube-system Active 6d12h
1.1.1.2.查看名稱空間內的資源
[root@hdss7-21 ~]# kubectl get all -n default
//查詢default名稱空間下所有的資源,默認直接 kubectl get all 和 -n default 同等作用
NAME READY STATUS RESTARTS AGE //pod資源
pod/nginx-ds-mcvxt 1/1 Running 1 6d13h
pod/nginx-ds-zsnz9 1/1 Running 1 6d13h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE //service資源
service/kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 6d12h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE //pod控制器資源
daemonset.apps/nginx-ds 2 2 2 2 2 <none> 6d5h
1.1.1.3.創建名稱空間
[root@hdss7-21 ~]# kubectl create namespace app //namespace 可以簡寫為ns
namespace/app created
[root@hdss7-21 ~]# kubectl get namespace
NAME STATUS AGE
app Active 13s
default Active 6d13h
kube-node-lease Active 6d13h
kube-public Active 6d13h
kube-system Active 6d13h
1.1.1.4.刪除名稱空間
[root@hdss7-21 ~]# kubectl delete namespace app
namespace "app" deleted
[root@hdss7-21 ~]# kubectl get namespace
NAME STATUS AGE
default Active 6d13h
kube-node-lease Active 6d13h
kube-public Active 6d13h
kube-system Active 6d13h
1.1.2.管理Deployment資源
1.1.2.1.創建Deployment
[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
1.1.2.2.查看Deployment
簡單查看
[root@hdss7-21 ~]# kubectl get deployment -n kube-public
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-dp 1/1 1 1 64s
擴展查看
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-v96vj 1/1 Running 0 90s
詳細描述
[root@hdss7-21 ~]# kubectl describe deployment nginx-dp -n kube-public
Name: nginx-dp
Namespace: kube-public
CreationTimestamp: Sat, 23 Nov 2019 15:48:18 +0800
Labels: app=nginx-dp
Annotations: deployment.kubernetes.io/revision: 1 //注解
Selector: app=nginx-dp
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
預期一個
StrategyType: RollingUpdate //更新策略,默認滾動發布
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx-dp
Containers:
nginx:
Image: harbor.od.com/public/nginx:v1.7.9
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets: <none>
NewReplicaSet: nginx-dp-5dfc689474 (1/1 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 3h deployment-controller Scaled up replica set nginx-dp-5dfc689474 to 1
1.1.2.3.查看pod資源
[root@hdss7-21 ~]# kubectl get pods -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-v96vj 1/1 Running 0 174m 172.7.21.3 hdss7-21.host.com <none> <none>
1.1.2.4.進入pod資源
[root@hdss7-21 ~]# kubectl exec -ti nginx-dp-5dfc689474-v96vj /bin/bash -n kube-public
root@nginx-dp-5dfc689474-v96vj:/# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:07:15:03 brd ff:ff:ff:ff:ff:ff
inet 172.7.21.3/24 brd 172.7.21.255 scope global eth0
valid_lft forever preferred_lft forever
注意,也可以用docker exec,必須是pod運行的那台主機
1.1.2.5.刪除pod資源(重啟)
[root@hdss7-21 ~]# watch -n 1 'kubectl describe deployment nginx-dp -n kube-public|grep -C 5 Event'
[root@hdss7-21 ~]# kubectl delete pod nginx-dp-5dfc689474-v96vj -n kube-public
pod "nginx-dp-5dfc689474-v96vj" deleted
[root@hdss7-21 ~]# kubectl get pods -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-ggxl5 1/1 Running 0 5m29s 172.7.22.3 hdss7-22.host.com <none> <none>
使用watch觀察pod重建狀態變化
強制刪除參數:--force --grace-period=0
1.1.2.6.刪除deployment
[root@hdss7-21 ~]# kubectl delete deployment nginx-dp -n kube-public
deployment.extensions "nginx-dp" deleted
[root@hdss7-21 ~]# kubectl get all -n kube-public
No resources found.
1.1.3.管理Service資源
1.1.3.1.創建service
重新創建回來
[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@hdss7-21 ~]# kubectl get all -n kube-public
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-ggsn2 1/1 Running 0 17s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-dp 1/1 1 1 17s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-dp-5dfc689474 1 1 1 17s
創建service
[root@hdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed
1.1.3.2.查看service
[root@hdss7-21 ~]# kubectl get all -n kube-public //看到多出來一個service
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-ggsn2 1/1 Running 0 112s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-dp ClusterIP 192.168.95.151 <none> 80/TCP 24s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-dp 1/1 1 1 112s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-dp-5dfc689474 1 1 1 112s
#######由於我們沒有安裝flannel插件,所有只能到運行pod主機查看###########
[root@hdss7-21 ~]# kubectl get pod -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-ggsn2 1/1 Running 0 9m40s 172.7.22.3 hdss7-22.host.com <none> <none>
[root@hdss7-22 ~]# curl 192.168.95.151
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
[root@hdss7-22 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
TCP 192.168.95.151:80 nq
-> 172.7.22.3:80 Masq 1 0 0
1.1.3.3.詳細查看service
[root@hdss7-22 ~]# kubectl describe svc nginx-dp -n kube-public
Name: nginx-dp
Namespace: kube-public
Labels: app=nginx-dp
Annotations: <none>
Selector: app=nginx-dp
Type: ClusterIP
IP: 192.168.95.151
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 172.7.22.3:80
Session Affinity: None
Events: <none>
1.1.3.4.deployment資源,查看LVS調度
[root@hdss7-22 ~]# kubectl scale deployment nginx-dp --replicas=2 -n kube-public
deployment.extensions/nginx-dp scaled
[root@hdss7-22 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
TCP 192.168.95.151:80 nq
-> 172.7.21.3:80 Masq 1 0 0
-> 172.7.22.3:80 Masq 1 0 0
注意:這個192.168.95.151是預先設置生成的虛擬IP,只在集群內部生效,比如hdss7-11是ping不通的,但是后面我們會有服務暴露的方法traefik來實現。因為沒有路由。無法跟外部通信。
1.1.4.陳述式用法總結
- 8s集群管理資源的唯一入口是通過響應的方法調用 apiserver的借口
- kubectl是官方的CLI命令行工具,用於和apiserver通信,將用戶在命令行輸入的命令轉化成apiserver可以識別的信息,今兒實現管理k8s資源
- kubectl命令大全:
* --help
* http://doce.kubernetes.org.cn - 陳述式資源管理可以管理90%以上的資源管理需求,缺點也很明顯
*命令冗長,復雜,難記憶
*特定場景下,無法滿足管理需求
*資源增刪查容易,改很痛苦
1.2.聲明式資源管理方法
1.2.1.查看資源配置清單
[root@hdss7-22 ~]# kubectl get pods nginx-dp-5dfc689474-hw6vm -o yaml -n kube-public
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2019-11-23T11:38:48Z"
generateName: nginx-dp-5dfc689474-
labels:
app: nginx-dp
pod-template-hash: 5dfc689474
name: nginx-dp-5dfc689474-hw6vm
namespace: kube-public
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: nginx-dp-5dfc689474
uid: e0f91f5a-a601-4728-b57e-77f82e2dcc5f
resourceVersion: "173324"
selfLink: /api/v1/namespaces/kube-public/pods/nginx-dp-5dfc689474-hw6vm
uid: 6ecd27e5-89cd-4803-bc9a-6c281c8e3f16
spec:
containers:
- image: harbor.od.com/public/nginx:v1.7.9
imagePullPolicy: IfNotPresent
name: nginx
[root@hdss7-22 ~]# kubectl get service -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-dp ClusterIP 192.168.95.151 <none> 80/TCP 50m
[root@hdss7-22 ~]# kubectl get service nginx-dp -o yaml -n kube-public
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2019-11-23T11:25:55Z"
labels:
app: nginx-dp
name: nginx-dp
namespace: kube-public
resourceVersion: "172227"
selfLink: /api/v1/namespaces/kube-public/services/nginx-dp
uid: f6cc8c7f-50f1-4c75-8eac-8d4a20133af1
spec:
clusterIP: 192.168.95.151
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-dp
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
1.2.2.解釋資源配置清單(apiversion、kind、meadata、spec)
[root@hdss7-22 ~]# kubectl explain service.metadata
KIND: Service
VERSION: v1
RESOURCE: metadata <Object>
DESCRIPTION:
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
ObjectMeta is metadata that all persisted resources must have, which
includes all objects users must create.
FIELDS:
annotations <map[string]string>
Annotations is an unstructured key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying objects. More
info: http://kubernetes.io/docs/user-guide/annotations
clusterName <string>
The name of the cluster which the object belongs to. This is used to
distinguish resources with same name and namespace in different clusters.
This field is not set anywhere right now and apiserver is going to ignore
it if set in create or update request.
1.2.3.創建資源配置清單
[root@hdss7-200 ~]# vi nginx-ds-svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
1.2.4.應用資源配置清單
[root@hdss7-21 ~]# kubectl create -f nginx-ds-svc.yaml
service/nginx-ds created
查看(默認default名稱空間)
[root@hdss7-21 ~]# kubectl get svc -n default
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 6d17h
nginx-ds ClusterIP 192.168.189.230 <none> 80/TCP 110s
[root@hdss7-21 ~]# kubectl get svc nginx-ds -o yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2019-11-23T12:28:20Z"
labels:
app: nginx-ds
name: nginx-ds
namespace: default
resourceVersion: "177549"
selfLink: /api/v1/namespaces/default/services/nginx-ds
uid: 24add3a1-cf18-4c29-85fc-65e45f54edbb
spec:
clusterIP: 192.168.189.230
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
1.2.5.修改資源配置清單並應用
1.2.5.1.在線修改
[root@hdss7-21 ~]# kubectl edit svc nginx-ds
Edit cancelled, no changes made. //修改port801
[root@hdss7-21 ~]# kubectl edit svc nginx-ds
service/nginx-ds edited
查看:
[root@hdss7-21 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 6d18h
nginx-ds ClusterIP 192.168.189.230 <none> 801/TCP 11m
1.2.5.2.離線修改
[root@hdss7-21 ~]# vi nginx-ds-svc.yaml
[root@hdss7-21 ~]# kubectl apply -f nginx-ds-svc.yaml
daemonset.extensions/nginx-ds configured
vim /opt/kubernetes/server/bin/kube-apiserver.sh
需要在kube-apiserver.sh的配置文件中指定端口范圍,--service-node-port-range?10-29999??這個范圍在第一次啟動service資源的時候不會出問題,但是在apply的時候,會受到限制,王導默認的是3000-29999?,修改以后重啟apiserver
1.2.6.刪除資源配置清單
1.2.6.1.陳述式刪除
[root@hdss7-21 ~]# kubectl delete svc nginx-ds
service "nginx-ds" deleted
1.2.6.2.聲明式刪除
[root@hdss7-21 ~]# kubectl delete -f nginx-ds-svc.yaml
service "nginx-ds" deleted
1.2.7.聲明式用法總結
- 聲明式資源管理,依賴於統一資源配置清單文件對資源進行管理
- 對資源的管理,通過事先定義在統一配置清單內,在通過陳述式-f命令應用到k8s集群里
- 語法格式:kubectl create/apply/delete -f /path/to/yaml
- 不懂的,善用explain查詢
2.k8s的核心插件
2.1.K8S的CNI網絡插件-Flannel
2.1.1.集群規划
主機名 | 角色 | ip |
---|---|---|
hdss7-21.host.com | flannel | 10.4.7.21 |
hdss7-22.host.com | flannel | 10.4.7.22 |
注意:這里部署以hdss7-21.host.com為例,另外一台運算節點方法類似
2.1.2.下載軟件,解壓,做軟鏈
[root@hdss7-21 ~]# cd /opt/src/
[root@hdss7-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@hdss7-21 src]# mkdir /opt/flannel-v0.11.0
[root@hdss7-21 src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@hdss7-21 src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel
2.1.3.最終目錄結構
[root@hdss7-21 flannel]# mkdir /opt/flannel/cert
[root@hdss7-21 flannel]# ll
total 34436
drwxr-xr-x 2 root root 6 Nov 23 21:35 cert
-rwxr-xr-x 1 root root 35249016 Jan 29 2019 flanneld
-rwxr-xr-x 1 root root 2139 Oct 23 2018 mk-docker-opts.sh
-rw-r--r-- 1 root root 4300 Oct 23 2018 README.md
2.1.4.拷貝證書
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem .
root@hdss7-200's password:
ca.pem 100% 1346 961.7KB/s 00:00
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem .
root@hdss7-200's password:
client.pem 100% 1363 19.3KB/s 00:00
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem .
root@hdss7-200's password:
client-key.pem
2.1.5.創建配置
[root@hdss7-21 flannel]# vi subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
注意:其他節點不同,SUBNET記得更改
2.1.6.創建啟動腳本
[root@hdss7-21 flannel]# vi flanneld.sh
!/bin/sh
./flanneld \
--public-ip=10.4.7.21 \
--etcd-endpoints=https://10.4.7.12:2379,https://10.4.7.21:2379,https://10.4.7.22:2379 \
--etcd-keyfile=./cert/client-key.pem \
--etcd-certfile=./cert/client.pem \
--etcd-cafile=./cert/ca.pem \
--iface=eth0 \
--subnet-file=./subnet.env \
--healthz-port=2401
注意:其他節點不同,public-ip記得更改
2.1.7.檢查配置,權限,創建日志目錄
[root@hdss7-21 flannel]# chmod +x flanneld.sh
[root@hdss7-21 flannel]# mkdir -p /data/logs/flanneld
2.1.8.創建supervisor配置
[root@hdss7-21 flannel]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
注意:其他節點不同,記得修改program
2.1.9.操作etcd,增加host-gw
[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
查看
[root@hdss7-21 etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
2.1.10.啟動服務並檢查
[root@hdss7-21 flannel]# supervisorctl update
flanneld-7-21: added process group
[root@hdss7-21 flannel]# supervisorctl status
flanneld-7-21 RUNNING pid 8173, uptime 0:01:49
[root@hdss7-21 flannel]# tail -fn 200 /data/logs/flanneld/flanneld.stdout.log
I1123 21:53:59.294735 8174 main.go:527] Using interface with name eth0 and address 10.4.7.21
I1123 21:53:59.294855 8174 main.go:540] Using 10.4.7.21 as external address
2019-11-23 21:53:59.295437 I | warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated
I1123 21:53:59.295497 8174 main.go:244] Created subnet manager: Etcd Local Manager with Previous Subnet: 172.7.21.0/24
I1123 21:53:59.295502 8174 main.go:247] Installing signal handlers
I1123 21:53:59.295794 8174 main.go:587] Start healthz server on 0.0.0.0:2401
I1123 21:53:59.306259 8174 main.go:386] Found network config - Backend type: host-gw
I1123 21:53:59.309982 8174 local_manager.go:201] Found previously leased subnet (172.7.21.0/24), reusing
I1123 21:53:59.312191 8174 local_manager.go:220] Allocated lease (172.7.21.0/24) to current node (10.4.7.21)
I1123 21:53:59.312442 8174 main.go:317] Wrote subnet file to ./subnet.env
I1123 21:53:59.312449 8174 main.go:321] Running backend.
I1123 21:53:59.312717 8174 route_network.go:53] Watching for new subnet leases
I1123 21:53:59.314605 8174 main.go:429] Waiting for 22h59m59.994825456s to renew lease
I1123 21:53:59.315253 8174 iptables.go:145] Some iptables rules are missing; deleting and recreating rules
I1123 21:53:59.315274 8174 iptables.go:167] Deleting iptables rule: -s 172.7.0.0/16 -j ACCEPT
I1123 21:53:59.316551 8174 iptables.go:167] Deleting iptables rule: -d 172.7.0.0/16 -j ACCEPT
I1123 21:53:59.318336 8174 iptables.go:155] Adding iptables rule: -s 172.7.0.0/16 -j ACCEPT
I1123 21:53:59.327024 8174 iptables.go:155] Adding iptables rule: -d 172.7.0.0/16 -j ACCEPT
2.1.11安裝部署啟動檢查所有集群規划節點
- 其他節點基本和hdss7-21相同,注意修改一下文件:
- subnet.env
- flanneld.sh
- /etc/supervisord.d/flannel.ini
2.1.12.再次驗證集群,POD之間網絡互通
[root@hdss7-22 flannel]# ping 172.7.21.2
PING 172.7.21.2 (172.7.21.2) 56(84) bytes of data.
64 bytes from 172.7.21.2: icmp_seq=1 ttl=63 time=0.554 ms
64 bytes from 172.7.21.2: icmp_seq=2 ttl=63 time=0.485 ms
[root@hdss7-21 flannel]# ping 172.7.22.2
PING 172.7.22.2 (172.7.22.2) 56(84) bytes of data.
64 bytes from 172.7.22.2: icmp_seq=1 ttl=63 time=0.271 ms
64 bytes from 172.7.22.2: icmp_seq=2 ttl=63 time=0.196 ms
2.1.13.在各運算節點上優化iptables規則
2.1.13.1.編輯並應用nginx-ds.yaml
hdss7-21 上
[root@hdss7-21 ~]# vi nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.od.com/public/nginx:curl
ports:
- containerPort: 80
[root@hdss7-21 ~]# kubectl apply -f nginx-ds.yaml
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
daemonset.extensions/nginx-ds configured
2.1.13.2.重啟pod加載nginx:url
[root@hdss7-21 ~]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
nginx-ds-mcvxt 1/1 Running 1 6d22h
nginx-ds-zsnz9 1/1 Running 1 6d22h
[root@hdss7-21 ~]# kubectl delete pod nginx-ds-mcvxt
pod "nginx-ds-mcvxt" deleted
[root@hdss7-21 ~]# kubectl delete pod nginx-ds-zsnz9
pod "nginx-ds-zsnz9" deleted
[root@hdss7-21 ~]# kubectl get pods -n default -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-d5kl8 1/1 Running 0 44s 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-jtn62 1/1 Running 0 56s 172.7.21.2 hdss7-21.host.com <none> <none>
2.1.13.3.進入21.2的節點pod,curl hdss7-22的主機
[root@hdss7-21 ~]# kubectl exec nginx-ds-jtn62 /bin/bash
[root@hdss7-21 ~]# kubectl exec -ti nginx-ds-jtn62 /bin/bash
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/#
root@nginx-ds-jtn62:/# curl 172.7.22.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
2.1.13.4.查看hdss7-22的nginx訪問日志
[root@hdss7-22 flannel]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-d5kl8 1/1 Running 0 5m24s 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-jtn62 1/1 Running 0 5m36s 172.7.21.2 hdss7-21.host.com <none> <none>
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]#
[root@hdss7-22 flannel]# kubectl logs -f nginx-ds-d5kl8
10.4.7.21 - - [23/Nov/2019:16:57:45 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
10.4.7.21 - - [23/Nov/2019:17:01:37 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
//由此可看出節點訪問ip是10.4.7.21,不應該是物理機的ip,說明做了snat轉換,而我們希望看到的是容器的真實IP
2.1.13.5.安裝iptables-services並設置規則
注意:另一節點,注意iptables規則略有不同,其他運算節點執行時注意修改
- 安裝iptables-services並設置開機啟動
[root@hdss7-21 ~]# yum install iptables-services -y
[root@hdss7-21 ~]# systemctl start iptables
[root@hdss7-21 ~]# systemctl enable iptables
- 優化SNAT規則,各運算節點之前的各POD之前的網絡通信不再出網
[root@hdss7-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables-save |grep -i postrouting
iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
##########規則定義#########
10.4.7.21主機上的,來源是172.7.21.0/24段的docker的ip,目標ip不是172.7.0.0/16段,網絡發包不從docker0橋設備出站的,才進行SNAT轉換
2.1.14.個運算節點保存iptables規則
- 各運算節點保存iptables規則
~]# service iptables save
iptables: Saving firewall rules to /etc/sysconfig/iptables:[ OK ]
- 各自訪問對方節點,並查看nginx-access日志,可看到現在暴露的都是容器ip
[root@hdss7-21 ~]# kubectl logs -f nginx-ds-jtn62
172.7.22.2 - - [23/Nov/2019:17:46:48 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
[root@hdss7-22 ~]# kubectl logs -f nginx-ds-d5kl8
10.4.7.21 - - [23/Nov/2019:17:01:37 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
172.7.21.2 - - [23/Nov/2019:17:43:34 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.38.0" "-"
2.1.15.原理剖析
flannetl host-gw模型
注意:此模型前提條件,所有的宿主機在同一個二層網絡下,也就是說他們指向的是同一個網關設備,此模型效率最高
[root@hdss7-21 ~}#route add -net 172.7.22.0/24 gw 10.4.7.22 dev eth0
[root@hdss7-22~}#route add -net 172.7.21.0/24 gw 10.4.7.21 dev eth0
[root@hdss7-21 flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.254 0.0.0.0 UG 100 0 0 eth0
10.4.7.0 0.0.0.0 255.255.255.0 U 100 0 0 eth0
172.7.21.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
172.7.22.0 10.4.7.22 255.255.255.0 UG 0 0 0 eth0
[root@hdss7-22 flannel]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.4.7.254 0.0.0.0 UG 100 0 0 eth0
10.4.7.0 0.0.0.0 255.255.255.0 U 100 0 0 eth0
172.7.21.0 10.4.7.21 255.255.255.0 UG 0 0 0 eth0
172.7.22.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
注意還要優化一條iptables規則:
~]# iptables -t filter -I FORWARD -d 172.7.21.0/24 -j ACCEPT
2.1.16.flannel VxLAN模型
使用方法:
1、先停止flennel.sh ---通過supervisor stop flanneld-7-[21.22]
2、刪除host-gw模型創建的路由
route del -net 172.7.21.0/24 gw 10.4.7.21 hdss7-22上
route del -net 172.7.22.0/24 gw 10.4.7.22 hdss7-21上
3、在etcd節點修改
./etcdctl get /coreos.com/network/config
./etcdctl rm /coreos.com/network/config
etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'
4、supervisorctl start flanneld-7-21
supervisorctl start flanneld-7-22
5、查看ifconfig 會多了一個flannel 1的設備,route -n是沒有路由的
2.1.17.flannel直接路由模型(智能判定)
類似與mysql日志的mixed模式
'{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN","Directrouting": true}}'
2.2.K8S的服務發現插件-CoreDNS
實現k8s里的DNS功能的插件
- kube-dns-kebernetes-v1.2至v1.10
- Coredns-kubenetes-v1.11至今
注意k8s里的dns不是萬能的!它應該只負責自動維護“服務名”-->“集群網絡IP”之間的關系
2.2.1.部署k8s的內網資源配置清單
注意:在運維主機hdss-200上,配置一個nginx虛擬主機,用以提供k8s統一的資源訪問清單入口
2.2.1.1.配置nginx
[root@hdss7-200 html]# vi /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
[root@hdss7-200 html]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@hdss7-200 html]# nginx -s reload
建立yaml目錄和coredns的yaml目錄
[root@hdss7-200 data]# mkdir /data/k8s-yaml
[root@hdss7-200 data]# cd k8s-yaml/
[root@hdss7-200 k8s-yaml]# mkdir coredns
2.2.1.2.配置dns解析
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111003 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
[root@hdss7-11 ~]# systemctl restart named
[root@hdss7-11 ~]# dig -t A k8s-yaml.od.com @10.4.7.11 +short
10.4.7.200
2.2.1.3.瀏覽器訪問k8s-yaml.od.com
**可以看到所有的目錄和yaml文件
2.2.2.部署coredns
吐血推薦黃導之kubernetes內部域名解析原理、弊端及優化方式----黃導
2.2.2.1.下載docker鏡像並打包推到harbor倉庫
[root@hdss7-200 ~]# docker pull coredns/coredns:1.6.1
[root@hdss7-200 coredns]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@hdss7-200 coredns]# docker push harbor.od.com/public/coredns:v1.6.1
2.2.2.2.准備資源配置清單
[https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base]
rbac.yaml
[root@hdss7-200 coredns]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
cm.yaml
[root@hdss7-200 coredns]# vi cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.4.7.11
cache 30
loop
reload
loadbalance
}
dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
2.2.2.3.應用資源配置清單
在任意運算節點上應用
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
2.2.2.4.查看創建的資源
[root@hdss7-21 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-6b6c4f9648-wrrbt 1/1 Running 0 111s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 99s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 1/1 1 1 111s
NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-6b6c4f9648 1 1 1 111s
詳細查看
[root@hdss7-21 ~]# kubectl get all -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/coredns-6b6c4f9648-wrrbt 1/1 Running 0 4m56s 172.7.21.4 hdss7-21.host.com <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 4m44s k8s-app=coredns
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/coredns 1/1 1 1 4m56s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/coredns-6b6c4f9648 1 1 1 4m56s coredns harbor.od.com/public/coredns:v1.6.1 k8s-app=coredns,pod-template-hash=6b6c4f9648
2.2.2.5.驗證coredns
[root@hdss7-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short
www.a.shifen.com.
39.156.66.18
39.156.66.14
[root@hdss7-21 ~]# dig -t A hdss7-21.host.com @192.168.0.2 +short
10.4.7.21 //自建dns是coredns上級dns,所以差得到
[root@hdss7-21 ~]# kubectl get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 7d <none>
[root@hdss7-21 ~]#
[root@hdss7-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-ggsn2 1/1 Running 0 7h23m
nginx-dp-5dfc689474-hw6vm 1/1 Running 0 7h8m
查看:
[root@hdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
[root@hdss7-21 ~]# kubectl get svc -o wide -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx-dp ClusterIP 192.168.95.151 <none> 80/TCP 7h21m app=nginx-dp
驗證:
[root@hdss7-21 ~]# dig -t A nginx-dp @192.168.0.2 +short
[root@hdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @192.168.0.2 +short
192.168.95.151
找台宿主機驗證
查看:
[root@hdss7-21 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ds-d5kl8 1/1 Running 0 120m 172.7.22.2 hdss7-22.host.com <none> <none>
nginx-ds-jtn62 1/1 Running 0 120m 172.7.21.2 hdss7-21.host.com <none> <none>
進入宿主機容器:
[root@hdss7-21 ~]# kubectl exec -ti nginx-ds-jtn62 /bin/bash
root@nginx-ds-jtn62:/#
驗證:
root@nginx-ds-jtn62:/# curl 192.168.95.151
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
root@nginx-ds-jtn62:/# curl nginx-dp.kube-public
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
為什么容器里不用加FQDN?
原因:
root@nginx-ds-jtn62:/# cat /etc/resolv.conf
nameserver 192.168.0.2
search default.svc.cluster.local svc.cluster.local cluster.local host.com
options ndots:5 //dns遞歸查詢的層級,默認5層,效率低,可以參考黃導文章
2.3.K8S的服務暴露插件-Traefik
起因:其實此時外部無法解析到,coredns只對內部解析
[root@hdss7-21 ~]# curl nginx-dp.kube-public.svc.cluster.local.
curl: (6) Could not resolve host: nginx-dp.kube-public.svc.cluster.local.; Unknown error
由來:以上案例,k8s的dns實現了服務在集群"內"被自動發現,那如何是的服務在k8s集群 "外"被使用和訪問呢?
2.3.1.NodePort
注意:無法使用kube-proxy的ipvs模型,只能用iptables模型,調度算法也只支持 RR。
2.3.1.1.修改nginx-ds的service資源配置清單
2.3.1.2.重建nginx-ds的service資源
2.3.1.3.查看service
2.3.1.4.瀏覽器訪問
略。。。以后更新
2.3.2.部署traefik(ingress控制器)
注意:
- Ingress只能調度並爆露7層應用,特指http和https協議
- Ingress 是k8s API的標准資源類型之一,也是一種核心資源,它其實就是一組基於域名和URL路徑,把用戶的請求轉發至指定Service資源的規則
- 可以將集群外部的請求流量,轉發至集群內部,從而實現服務爆露
- Ingress控制器是能夠為Igress資源監聽某套接字,然后根據Ingress規則匹配機制路由調度流量的一個組件。
- 誰白了,Ingress沒啥神秘的,就是個nginx+一段go腳本而已
2.3.2.1.准備traefik鏡像,打包,並上傳到harbor倉庫
運維主機hdss7-200上
[root@hdss7-200 k8s-yaml]# docker pull traefik:v1.7.2-alpine
[root@hdss7-200 k8s-yaml]# docker images|grep traefik
traefik v1.7.2-alpine add5fac61ae5 13 months ago 72.4MB
[root@hdss7-200 k8s-yaml]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 k8s-yaml]# docker push harbor.od.com/public/traefik:v1.7.2
The push refers to repository [harbor.od.com/public/traefik]
a02beb48577f: Pushed
ca22117205f4: Pushed
3563c211d861: Pushed
df64d3292fd6: Pushed
v1.7.2: digest: sha256:6115155b261707b642341b065cd3fac2b546559ba035d0262650b3b3bbdd10ea size: 1157
2.3.2.2.准備資源配置清單
運維主機hdss7-200上
官方的yaml文件
rbac.yaml
[root@hdss7-200 k8s-yaml]# mkdir traefik
[root@hdss7-200 k8s-yaml]# cd traefik/
[root@hdss7-200 traefik]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
ds.ymal
[root@hdss7-200 traefik]# vi ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.4.7.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.log
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.log
- --metrics.prometheus
svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
ingress.yaml
[root@hdss7-200 traefik]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
2.3.2.3.應用資源配置清單
任意一台運算節點上
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
daemonset.extensions/traefik-ingress created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
service/traefik-ingress-service created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml
ingress.extensions/traefik-web-ui created
2.3.2.4.檢查創建的資源
[root@hdss7-21 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-wrrbt 1/1 Running 0 108m
traefik-ingress-9z6wd 1/1 Running 0 10m
traefik-ingress-ksznv 1/1 Running 0 10m
報錯:
[root@hdss7-21 ~]# kubectl describe pods traefik-ingress-ksznv -n kube-system
Warning FailedCreatePodSandBox 6m23s kubelet, hdss7-21.host.com Failed create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod "traefik-ingress-ksznv": Error response from daemon: driver failed programming external connectivity on endpoint k8s_POD_traefik-ingress-ksznv_kube-system_d1389546-d27b-47cd-92c1-f5a8963043fd_0 (2f032861a4eb0e5240554e388b8ae8a5efd9ead3c56e50840aacdf43570c434b): (iptables failed: iptables --wait -t filter -A DOCKER ! -i docker0 -o docker0 -p tcp -d 172.7.21.5 --dport 80 -j ACCEPT: iptables: No chain/target/match by that name.
解決:
systemctl restart docker.service
2.3.3.解析域名
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111004 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
[root@hdss7-11 ~]# systemctl restart named
2.3.4.配置反代
注意:hdss7-11和hdss7-12都要配置
[root@hdss7-11 ~]# vi /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
server 10.4.7.21:81 max_fails=3 fail_timeout=10s;
server 10.4.7.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-11 ~]# nginx -t
[root@hdss7-11 ~]# nginx -s reload
2.3.5.瀏覽器訪問
2.4.k8S的GUI資源管理插件-儀表盤
2.4.1.部署kubernetes-dashboard
2.4.1.1.准備dashboard鏡像
[root@hdss7-200 harbor]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
v1.8.3: Pulling from k8scn/kubernetes-dashboard-amd64
a4026007c47e: Pull complete
Digest: sha256:ebc993303f8a42c301592639770bd1944d80c88be8036e2d4d0aa116148264ff
Status: Downloaded newer image for k8scn/kubernetes-dashboard-amd64:v1.8.3
docker.io/k8scn/kubernetes-dashboard-amd64:v1.8.3
[root@hdss7-200 harbor]# docker images|grep dashboard
k8scn/kubernetes-dashboard-amd64 v1.8.3 fcac9aa03fd6 18 months ago 102MB
[root@hdss7-200 harbor]# docker tag fcac9aa03fd6 harbor.od.com/public/dashboard:v1.8.3
[root@hdss7-200 harbor]# docker push harbor.od.com/public/dashboard:v1.8.3
The push refers to repository [harbor.od.com/public/dashboard.od.com]
23ddb8cbb75a: Pushed
v1.8.3: digest: sha256:ebc993303f8a42c301592639770bd1944d80c88be8036e2d4d0aa116148264ff size: 529
2.4.1.2.創建資源配置清單
資源配置清單來源
運維主機hdss7-200上
[root@hdss7-200 harbor]# mkdir -p /data/k8s-yaml/dashboard && cd /data/k8s-yaml/dashboard
[root@hdss7-200 dashboard]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
[root@hdss7-200 dashboard]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/dashboard:v1.8.3
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
[root@hdss7-200 dashboard]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
[root@hdss7-200 dashboard]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
2.4.1.3.應用資源配置清單
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-admin created
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
deployment.apps/kubernetes-dashboard created
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
service/kubernetes-dashboard created
[root@hdss7-21 containers]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml
ingress.extensions/kubernetes-dashboard created
2.4.1.4.查看創建的資源
[root@hdss7-21 containers]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-wrrbt 1/1 Running 0 6d8h
kubernetes-dashboard-76dcdb4677-t4swp 0/1 ImagePullBackOff 0 10m
traefik-ingress-jsrcs 1/1 Running 0 24h
traefik-ingress-v4qxh 1/1 Running 0 24h
[root@hdss7-21 containers]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 6d8h
kubernetes-dashboard ClusterIP 192.168.134.43 <none> 443/TCP 10m
traefik-ingress-service ClusterIP 192.168.130.180 <none> 80/TCP,8080/TCP 6d6h
[root@hdss7-21 containers]# kubectl get ingress -n kube-system
NAME HOSTS ADDRESS PORTS AGE
kubernetes-dashboard dashboard.od.com 80 11m
traefik-web-ui traefik.od.com 80 6d6h
2.4.2.解析域名
dhss7-11上
[root@hdss7-11 conf.d]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019111005 ; serial //前滾一個序列號
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.4.7.11
harbor A 10.4.7.200
k8s-yaml A 10.4.7.200
traefik A 10.4.7.10
dashboard A 10.4.7.10
[root@hdss7-11 conf.d]# systemctl restart named
[root@hdss7-11 conf.d]# dig -t A dashboard.od.com @10.4.7.11 +short
10.4.7.10
[root@hdss7-21 containers]# dig -t A dashboard.od.com @192.168.0.2 +short
10.4.7.10
注意:生產上不建議直接restart named,建議rndc 來 reload
2.4.3.瀏覽器訪問
注意:dashboardv1.8.3直接可以跳過,需要升級更高版本,拿令牌登陸,需要https,上圖也可以看到,現實不安全的連接
//令牌命令行獲取方式:
[root@hdss7-21 conf]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
coredns-token-mhstl kubernetes.io/service-account-token 3 6d10h
default-token-ntmvw kubernetes.io/service-account-token 3 13d
kubernetes-dashboard-admin-token-ws4ck kubernetes.io/service-account-token 3 137m
kubernetes-dashboard-key-holder Opaque 2 94m
traefik-ingress-controller-token-55b2f kubernetes.io/service-account-token 3 6d9h
conf]# kubectl describe secret kubernetes-dashboard-admin-token-ws4ck -n kube-system
2.4.4.配置認證
2.4.4.1.openssl簽發證書(可選)
setp1:先去創建dashboard.od.com網站的私鑰
[root@hdss7-200 certs]# (umask 077; openssl genrsa -out dashboard.od.com.key 2048)
Generating RSA private key, 2048 bit long modulus
....................+++
........+++
e is 65537 (0x10001)
setp2:openssl命令去做證書簽發的請求文件
[root@hdss7-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@hdss7-200 certs]#ls -l
-rw------- 1 root root 1675 Nov 30 13:18 dashboard.od.com.key
-rw-r--r-- 1 root root 1005 Nov 30 13:28 dashboard.od.com.csr
setp3: x509簽發證書
[root@hdss7-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops
Getting CA Private Key
[root@hdss7-200 certs]#ls -l
-rw-r--r-- 1 root root 1196 Nov 30 13:36 dashboard.od.com.crt
-rw------- 1 root root 1675 Nov 30 13:18 dashboard.od.com.key
-rw-r--r-- 1 root root 1005 Nov 30 13:28 dashboard.od.com.csr
setp4:查看證書
[root@hdss7-200 certs]# cfssl-certinfo -cert dashboard.od.com.crt
2.4.4.2.cfssl簽發證書
setp1:找一個json文件然后修改域名
[root@hdss7-200 certs]# cp client-csr.json od.com-csr.json
[root@hdss7-200 certs]# vi od.com-csr.json
{
"CN": "*.od.com",
"hosts": [
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
setp2:簽發
[root@hdss7-200 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server od.com-csr.json |cfssl-json -bare od.com
setp3:查看生成的證書
[root@hdss7-200 certs]# ls -l
-rw-r--r-- 1 root root 993 Nov 30 14:02 od.com.csr
-rw-r--r-- 1 root root 280 Nov 30 13:58 od.com-csr.json
-rw------- 1 root root 1679 Nov 30 14:02 od.com-key.pem
-rw-r--r-- 1 root root 1363 Nov 30 14:02 od.com.pem
2.4.4.3.拷貝證書
hdss7-11上
[root@hdss7-11 nginx]# ls
conf.d fastcgi.conf fastcgi_params koi-utf mime.types nginx.conf scgi_params uwsgi_params win-utf
default.d fastcgi.conf.default fastcgi_params.default koi-win mime.types.default nginx.conf.default scgi_params.default uwsgi_params.default
[root@hdss7-11 nginx]# mkdir certs
[root@hdss7-11 nginx]# cd certs/
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/od.com-key.pem .
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/od.com.pem .
2.4.4.4.創建nginx配置文件
[root@hdss7-11 conf.d]# vi dashboard.od.com.conf
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 443 ssl;
server_name dashboard.od.com;
ssl_certificate "certs/od.com.pem";
ssl_certificate_key "certs/od.com-key.pem";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-11 nginx]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@hdss7-11 nginx]# nginx -s reload
2.4.4.5.瀏覽器訪問
2.4.4.5.1.windows 瀏覽器訪問
2.4.4.5.2.windows導出ca證書到windows桌面
由於證書是自簽的,所以需要ca導入本地瀏覽器
[root@hdss7-200 certs]# sz ca.pem
2.4.4.5.3.windows改擴展名為crt並安裝
注意:此法新版瀏覽器失效
2.4.4.6.找一個令牌進行測試
[root@hdss7-21 conf]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
coredns-token-mhstl kubernetes.io/service-account-token 3 6d12h
default-token-ntmvw kubernetes.io/service-account-token 3 13d
kubernetes-dashboard-admin-token-ws4ck kubernetes.io/service-account-token 3 4h9m
kubernetes-dashboard-key-holder Opaque 2 3h25m
traefik-ingress-controller-token-55b2f kubernetes.io/service-account-token 3 6d10h
[root@hdss7-21 conf]# kubectl describe secret kubernetes-dashboard-admin-token-ws4ck -n kube-system
Name: kubernetes-dashboard-admin-token-ws4ck
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin
kubernetes.io/service-account.uid: 80808715-32d9-41b1-bd78-7ed7ab3af849
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1346 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi13czRjayIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjgwODA4NzE1LTMyZDktNDFiMS1iZDc4LTdlZDdhYjNhZjg0OSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.xfboNXWurS7FEEOstO85MRElasKlvy-gapGLLHPJYHWjPi03gl5OAWXvDQuDJ9vXBrY33jsCkuCj0BgTFMKgXFuQANAaQ3pmg8Vs5_ViW19n4z5QI0E8jfV0rV_vqEz-lc5oXEHtnfGMkkkdr7PkVlZI4PpZgAE6oLjFAoKcmYgTy8Q32EYZf1VmhneB_OdHIw_bh_L1M_HRo9q3bSWESOWWVS68tmW0ZBHphd-Ntt5XqgkJygTYgKEtY-K8DtE_8anJOT0c4hvlc1PTwp1xmbyKwvJgxMuEXiTnPndgHA5rq-8LwuXs8pDc3llRDYVfCutr4ik9KqUSP-Md7Txfow
粘貼token登陸
2.4.5.部署heapster(官方今后廢棄)
2.4.5.1.准備heapster鏡像(需要kexue上網)
[root@hdss7-200 certs]# docker pull quay.io/bitnami/heapster:1.5.4
[root@hdss7-200 src]# docker tag c359b95ad38b harbor.od.com/public/heapster:v1.5.4
[root@hdss7-200 src]# docker push harbor.od.com/public/heapster:v1.5.4
2.4.5.2.准備資源配置清單
hdss7-200上
[root@hdss7-200 k8s-yaml]# mkdir -p /data/k8s-yaml/dashboard/heapster
[root@hdss7-200 k8s-yaml]# cd dashboard/heapster/
[root@hdss7-200 heapster]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
[root@hdss7-200 heapster]# vi dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
[root@hdss7-200 heapster]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
2.4.5.3.應用資源配置清單
任意運算節點上
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/rbac.yaml
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/dp.yaml
deployment.extensions/heapster created
[root@hdss7-21 conf]# kubectl apply -f http://k8s-yaml.od.com/dashboard/heapster/svc.yaml
service/heapster created
查看:
[root@hdss7-21 conf]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-7mr4w 1/1 Running 0 3h55m
heapster-b5b9f794-gz6mf 1/1 Running 0 68s
kubernetes-dashboard-76dcdb4677-kncnz 1/1 Running 0 3h58m
traefik-ingress-jsrcs 1/1 Running 0 29h
traefik-ingress-v4qxh 1/1 Running 0 29h
2.4.5.4.重啟dashboard(圖表僅供參考)
3.K8S集群平滑回退或升級
注意:生產根據業務來規划升級時間,這里以hdss7-21為例
3.1.環境描述
可以看到我們集群現在是v1.15.2版本,我們要升級v1.15.4版本
[root@hdss7-21 conf]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 13d v1.15.2
hdss7-22.host.com Ready master,node 13d v1.15.2
3.2.下線升級的節點
修改nginx.conf,把此節點注釋掉,此處略。。。
刪除節點之前可以看到兩個節點,pod隨機運行在21.22兩個節點上
[root@hdss7-21 conf]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 13d v1.15.2
hdss7-22.host.com Ready master,node 13d v1.15.2
[root@hdss7-21 conf]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6b6c4f9648-7mr4w 1/1 Running 0 4h7m 172.7.22.6 hdss7-22.host.com <none> <none>
heapster-b5b9f794-gz6mf 1/1 Running 0 13m 172.7.21.4 hdss7-21.host.com <none> <none>
kubernetes-dashboard-76dcdb4677-kncnz 1/1 Running 0 4h11m 172.7.22.5 hdss7-22.host.com <none> <none>
traefik-ingress-jsrcs 1/1 Running 0 29h 172.7.21.5 hdss7-21.host.com <none> <none>
traefik-ingress-v4qxh 1/1 Running 0 29h 172.7.22.4 hdss7-22.host.com <none> <none>
刪除節點之后,可以看到只剩一個節點,pod全部調度到hdss7-22節點上
[root@hdss7-21 conf]# kubectl delete node hdss7-21.host.com
node "hdss7-21.host.com" deleted
[root@hdss7-21 conf]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss7-22.host.com Ready master,node 13d v1.15.2
[root@hdss7-21 conf]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6b6c4f9648-7mr4w 1/1 Running 0 4h8m 172.7.22.6 hdss7-22.host.com <none> <none>
heapster-b5b9f794-h84z9 1/1 Running 0 24s 172.7.22.8 hdss7-22.host.com <none> <none>
kubernetes-dashboard-76dcdb4677-kncnz 1/1 Running 0 4h12m 172.7.22.5 hdss7-22.host.com <none> <none>
traefik-ingress-v4qxh 1/1 Running 0 29h 172.7.22.4 hdss7-22.host.com <none> <none>
[root@hdss7-21 conf]# dig -t A kubernetes.default.svc.cluster.local @192.168.0.2 +short //可以看到集群內的服務根本不受影響
192.168.0.1
3.3.解壓,改名,創建軟鏈接
解壓:
[root@hdss7-21 opt]# mkdir 123
[root@hdss7-21 opt]# cd src/
[root@hdss7-21 src]# tar xfv kubernetes-server-linux-amd64-v1.15.4.tar.gz -C /opt/123/
改名:
[root@hdss7-21 src]# cd ../123/
[root@hdss7-21 123]# mv kubernetes/ ../kubernetes-v1.15.4
[root@hdss7-21 opt]# rm -rf 123/
軟鏈接:
[root@hdss7-21 opt]# ll
lrwxrwxrwx 1 root root 24 Nov 17 01:35 kubernetes -> /opt/kubernetes-v1.15.2/
drwxr-xr-x 4 root root 50 Nov 17 01:37 kubernetes-v1.15.2
drwxr-xr-x 4 root root 79 Sep 18 23:09 kubernetes-v1.15.4
[root@hdss7-21 opt]# rm -f kubernetes
[root@hdss7-21 opt]# ln -s /opt/kubernetes-v1.15.4/ /opt/kubernetes
[root@hdss7-21 opt]# ll
total 4
lrwxrwxrwx 1 root root 24 Nov 17 01:35 kubernetes -> /opt/kubernetes-v1.15.4/
drwxr-xr-x 4 root root 76 Nov 30 16:07 kubernetes-v1.15.2
drwxr-xr-x 4 root root 79 Sep 18 23:09 kubernetes-v1.15.4
drwxr-xr-x 2 root root 4096 Nov 23 21:26 src
刪除無用的文件:
[root@hdss7-21 opt]# cd kubernetes
[root@hdss7-21 kubernetes]# ls
addons kubernetes-src.tar.gz LICENSES server
[root@hdss7-21 kubernetes]# rm -f kubernetes-src.tar.gz
[root@hdss7-21 kubernetes]# cd server/bin/
[root@hdss7-21 bin]# rm -fr *.tar
[root@hdss7-21 bin]# rm -fr *_tag
3.4.拷貝conf文件和cert文件和sh腳本
[root@hdss7-21 bin]# mkdir conf
[root@hdss7-21 bin]# mkdir cert
[root@hdss7-21 bin]# cp /opt/kubernetes-v1.15.2/server/bin/cert/* ./cert/
[root@hdss7-21 bin]# cp /opt/kubernetes-v1.15.2/server/bin/conf/* ./conf/
[root@hdss7-21 bin]# cp /opt/kubernetes-v1.15.2/server/bin/*.sh .
3.5.重啟服務並檢查
注意:生產上要一個一個重啟,etcd,flannel不需要重啟
[root@hdss7-21 bin]# supervisorctl restart all
[root@hdss7-21 bin]# supervisorctl status
etcd-server-7-21 RUNNING pid 9595, uptime 0:04:40
flanneld-7-21 RUNNING pid 12236, uptime 0:00:35
kube-apiserver-7-21 RUNNING pid 9655, uptime 0:04:40
kube-controller-manager-7-21 RUNNING pid 9671, uptime 0:04:40
kube-kubelet-7-21 RUNNING pid 11628, uptime 0:01:55
kube-proxy-7-21 RUNNING pid 9691, uptime 0:04:40
kube-scheduler-7-21 RUNNING pid 9706, uptime 0:04:40
[root@hdss7-21 bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready <none> 7m26s v1.15.4
hdss7-22.host.com Ready master,node 13d v1.15.2
[root@hdss7-21 bin]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6b6c4f9648-7mr4w 1/1 Running 0 4h46m 172.7.22.6 hdss7-22.host.com <none> <none>
heapster-b5b9f794-h84z9 1/1 Running 0 37m 172.7.22.8 hdss7-22.host.com <none> <none>
kubernetes-dashboard-76dcdb4677-kncnz 1/1 Running 0 4h50m 172.7.22.5 hdss7-22.host.com <none> <none>
traefik-ingress-6jgm6 1/1 Running 0 8m52s 172.7.21.2 hdss7-21.host.com <none> <none>
traefik-ingress-v4qxh 1/1 Running 0 30h 172.7.22.4 hdss7-22.host.com <none> <none>