1.修改主機名
如何使用hostnamectl set-hostname name來為每台主機設置不同的機器名
#hostnamectl set-hostname k8s-master01
或者使用以下方式對/etc/hosts寫入
cat >> /etc/hosts << EOF
192.168.80.10 k8s-master01
192.168.80.20 k8s-master02
192.168.80.30 k8s-master03
192.168.80.40 k8s-node01
192.168.80.50 k8s-node02
192.168.80.100 lbvip
EOF
2.安裝依賴包
yum -y install conntrack ntpdate ntp ipvsadm ipset jq iptables curl vim sysstat libseccomp wget lrzsz net-tools git
3.設置防火牆為iptables並設置空規則
systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
4.對升級的軟件包等更新最新
yum update -y
5.關閉SELINUX
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
6.調整內核參數,對於K8S
modprobe br_netfilter
cat > kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用swap空間,只有當系統OOM時才允許使用它
vm.overcommit_memory=1 #不檢查物理內存是否夠用
vm.panic_on_oom=0 #開啟OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
7.調整時區
# 設置系統時區為 中國/上海
timedatectl set-timezone Asia/Shanghai
# 將當前的UTC時間寫入硬件時鍾
timedatectl set-local-rtc 0
# 重啟依賴於系統時間的服務
systemctl restart rsyslog && systemctl restart crond
8.關閉不必要的服務
#CentOS7下,CentOS8.1不需要
systemctl stop postfix && systemctl disable postfix
9.設置rsyslogd和systemd journald
mkdir /var/log/journal #持久化保存日志的目錄
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf << EOF
[Journal]
# 持久化保存到磁盤
Storage=persistent
# 壓縮歷史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空間
SystemMaxUse=10G
# 單日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存時間 2周
MaxRetentionSec=2week
# 不將日志轉發到syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
10.升級到最新版本的內核
如果是CentOS7.x系統自帶的3.10.x內存存在一些Bugs,導致運行的Docker、Kubernetes不穩定,建議
升級到4.4.x內核及以上的內核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
# 安裝完成后檢查/boot/grub2/grub.cfg中對應內核menuentry中是否包含initrd16配置,如果沒有,
再安裝一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 設置開機從新內核啟動
grub2-set-default 'CentOS Linux (4.4.215-1.el7.elrepo.x86_64) 7 (Core)'
#重新啟動系統
reboot
11.關閉NUMA
cp /etc/default/grub{,.bak}
vim /etc/default/grub #在GRUB_CMDLINE_LINUX一行添加'numa=off'參數,如下所示:
GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline rhgb quiet numa=off"
12.Kube-proxy開啟ipvs的前置條件
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#重啟系統
reboot
13.安裝軟件
#安裝docker
yum install -y yum-utils device-mapper-persistent-data lvm2
cd /etc/yum.repos.d
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 創建/etc/docker目錄
mkdir /etc/docker
#最后/etc/docker/daemon.json文件的內容如下:
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"insecure-registries": ["https://hub.mfyxw.com"],
"registry-mirrors": ["https://jltw059v.mirror.aliyuncs.com"]
}
EOF
#列出docker-ce版本
yum list docker-ce --showduplicates | sort -r
yum install -y docker-ce
yum install -y docker-ce-18.06.3.ce-3.el7
mkdir -p /etc/systemd/system/docker.servicd.d
#重啟docker服務
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
14.安裝haproxy和keepalived軟件
yum -y install haproxy keepalived
15.添加haproxy配置文件內容
#在對配置文件修改前先做好原配置文件的備份
mv /etc/haproxy/haproxy.cfg{,.bak}
#對原來的配置文件內容修改如下
cat > /etc/haproxy/haproxy.cfg << EOF
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
listen stats
bind *:8006
mode http
stats enable
stats hide-version
stats uri /stats
stats refresh 30s
stats realm Haproxy\ Statistics
stats auth admin:admin
frontend k8s-master
bind 0.0.0.0:58443 #此端口地址建議修改大點,以免和prometheu+grafana的端口產生沖突
bind 127.0.0.1:58443 #此端口地址建議修改大點,以免和prometheu+grafana的端口產生沖突
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
#如下內容的主機名和IP地址請按你實際環境的來填寫
server k8s-master01 192.168.80.10:6443 check
server k8s-master02 192.168.80.20:6443 check
server k8s-master03 192.168.80.30:6443 check
EOF
#把修改好的haproxy.cfg文件分別復制到另外二個master節點的/etc/haproxy/目錄下
scp -r /etc/haproxy/haproxy.cfg k8s-master01:/etc/haproxy/
scp -r /etc/haproxy/haproxy.cfg k8s-master02:/etc/haproxy/
scp -r /etc/haproxy/haproxy.cfg k8s-master03:/etc/haproxy/
16.添加keepalived配置文件
#對原有的keepalived配置文件做備份
mv /etc/keepalived/keepalived.conf{,.bak}
#直接復制如下內容到shell中回車即可(已經重新對keepalived.conf文件修改)
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 2
weight -5
fall 3
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168.80.10 #此處請填寫相對應的本地的IP地址,IP不能相同,每個master節點的請另行修改
virtual_router_id 51
priority 102 #優先級高的能優先獲得vip地址,優先級不能相同,每個master節點的請另行修改
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.80.100 #此處填寫你要設定的VIP地址
}
# track_script {
# chk_apiserver
# }
}
EOF
#復制keepalived.conf文件到另外二個master節點,復制過去后請自動修改IP地址和優先級,VIP地址不需要修改
scp -r /etc/keepalived/keepalived.conf k8s-master02:/etc/keepalived/
scp -r /etc/keepalived/keepalived.conf k8s-master03:/etc/keepalived/
17.為keepalived添加檢測腳本
cat > /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash
function check_apiserver() {
for ((i=0;i<5;i++));do
apiserver_job_id=$(pgrep kube-apiserver)
if [[ ! -z $apiserver_job_id ]];then
return
else
sleep 2
fi
apiserver_job_id=0
done
}
# 1: running 0: stopped
check_apiserver
if [[ $apiserver_job_id -eq 0 ]]; then
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
#對檢測腳本添加執行權限
chmod a+x /etc/keepalived/check_apiserver.sh
#把檢測腳本分別復制到另外的master節點對應的/etc/keepalived目錄下
scp -r /etc/keepalived/check_apiserver.sh k8s-master02:/etc/keepalived/
scp -r /etc/keepalived/check_apiserver.sh k8s-master03:/etc/keepalived/
18.把haproxy和keepalived服務啟動
systemctl enable --now haproxy && systemctl enable --now keepalived
#查看haproxy和keepalived的服務狀態
systemctl status haproxy keepalived
19.安裝kubeadm主從
#所有節點都需要導入yum源(包括master和node)
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[Kubernetes]
name=Kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enable=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安裝kubeadm,kubectl kubelet(包括master和node節點)
yum -y install kubeadm-1.17.1 kubectl-1.17.1 kubelet-1.17.1
#也可以直接安裝最新版本的
yum -y install kubeadm kubectl kubelet
#設置kubelet開機自啟(包括master和node節點)
systemctl enable kubelet.service
20.導入所需要的鏡像
#將下載的鏡像包上傳到master節點並解壓導入(原需要的鏡像需要科學地上網下載才行,故都是通過把所需要的鏡像自行去阿里找並pull回來打包)
#Master節點:把打包的鏡像kubeadm-basic.images.tar.gz上傳到master節點解壓
tar xf kubeadm-basic.images.tar.gz
cd kubeadm-basix.images
#通過命令把鏡像導入到master節點,也可以寫成腳本進行導入
#手動導入方法
docker load -i apiserver_1.17.1.tar && docker load -i etcd_3.4.3-0.tar && docker load -i kube-con-manager_1.17.1.tar && docker load -i proxy_1.17.1.tar && docker load -i coredns_1.6.5.tar && docker load -i flannel.tar && docker load -i pause_3.1.tar && docker load -i scheduler_1.17.1.tar
#Node節點:把打包的鏡像kubeadm-basic.images.tar.gz上傳到master節點解壓
tar xf kubeadm-basic.images.tar.gz
cd kubeadm-basix.images
#通過命令把鏡像導入到master節點,也可以寫成腳本進行導入
#手動導入方法
docker load -i coredns_1.6.5.tar && docker load -i flannel.tar && docker load -i pause_3.1.tar && docker load -i proxy_1.17.1.tar
#腳本導入方法
cat > /root/import_image.sh << EOF
ls /root/kubeadm-base.images > /tmp/images-list.txt
cd /root/kubeadm-base.images
for i in $( cat /tmp/images-list.txt )
do
docker load -i $i
done
rm -fr /tmp/images-list.txt
EOF
#給腳本添加可執行權限
chmod a+x /root/import_image.sh
#執行腳本
bash /root/import_image.sh

21.生成k8s初始化配置文件
#在獲得vip的master節點上生成創始化配置文件
kubeadm config print init-defaults > kubeadm-init.yaml
#出現如下二段文字,忽略(1.17版本會生成默認配置文件會提示)
W0309 10:29:42.003091 2724 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0309 10:29:42.004138 2724 validation.go:28] Cannot validate kubelet config - no validator is available
#對生成的kubeadm-init.yaml文件進行修改
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.80.10 #當前master主機的IP地址,請自行修改
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: lbvip
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.80.100:58443" #此項如沒有請自行添加,此處請填寫你的高可用的VIP地址及端口
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io #默認會從k8s.gcr.io下載鏡像,需要科 學地上 網,此處可以修改為阿里雲鏡像的地址
#imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #此處的地址為阿里雲鏡像地址
kind: ClusterConfiguration
kubernetesVersion: v1.15.1 #版本需要修改為你下載的鏡像版本,請自行查閱需要的版本
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16 #此處需要添加pod網絡,需要與flanel的yaml上的網段一致,如不一致,請修改
serviceSubnet: 10.96.0.0/12
scheduler: {}
#在scheduler:{}后面添加如下內容(開啟ipvs,如果沒有開啟ipvs會默認降級為iptables):
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
#溫馨提示:如果不能通過科學地上網,建議修改為阿里雲鏡像地址進入下載,請自行查詢需要安裝哪個版本的
如果沒有導入鏡像而去網上拉取鏡像,可以先使用配置文件進行把鏡像拉取下來
kubeadm config images pull --config kubeadm-init.yaml
22.在初始化k8s之前需要確定haproxy和keepalived的服務是否正常
#檢查獲得vip地址的master節點的haproxy和keepalived的服務是否正常
systemctl status haproxy keepalived
23.使用配置文件初始化集群
#在獲得vip的master節點上使用配置文件創始化集群並把日志輸入到log文件保存
kubeadm init --config kubeadm-init.yaml | tee kubeadm-init.log

把主master節點的證書都復制到其它二個master節點,編寫了一個sh程序來復制
#以下腳本,需要你做了免密登錄才能使用
cat > /root/cert.sh << EOF
#!/bin/bash
USER=root
CONTROL_PLANE_IPS="k8s-master02 k8s-master03"
for host in $CONTROL_PLANE_IPS; do
ssh "${USER}"@${host} "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} "${USER}"@$host:/etc/kubernetes/pki
scp /etc/kubernetes/pki/etcd/{ca.key,ca.crt} "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes
done
EOF
chmod a+x /root/cert.sh
bash /root/cert.sh
#如未做免密登錄的請參考如下步驟
#分別在k8s-master02和k8s-master03節點執行相同操作
mkdir -pv /etc/kubernetes/pki/etcd
#把k8s-master01的證書復制到其它的master節點相對應的位置
scp /etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} k8s-master02:/etc/kubernetes/pki
scp /etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} k8s-master03:/etc/kubernetes/pki
scp /etc/kubernetes/pki/etcd/{ca.key,ca.crt} k8s-master02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/{ca.key,ca.crt} k8s-master03:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf k8s-master02:/etc/kubernetes
scp /etc/kubernetes/admin.conf k8s-master03:/etc/kubernetes
其它主節點加入集群,在使用節點加入的命令添加參數**
#--experimental-control-plane適用於1.14之前加入高可用master節點方案
kubeadmin join vip:端口 --token **** --discovery-token-ca-cert-hash *** --experimental-control-plane
#1.17的master節點高可用方案是如下:
kubeadm join 192.168.80.100:8443 --token **** \
--discovery-token-ca-cert-hash **** \
--control-plane
溫馨提示:****請參與於自己master節點最后生成的
在master節點上運行如下命令,本示例是以root用戶,建議使用其它用戶
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
24.Node節點加入集群
#如下方法是Node節點加入集群
kubeadm join 192.168.80.100:8443 --token **** \
--discovery-token-ca-cert-hash ****
溫馨提示:****請參與於自己master節點最后生成的
25.安裝flannel網絡插件
#去github上的flannel下載好kube-flannel.yaml文件到有vip的主master節點
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#推薦去阿里雲鏡像下載flannel回來后打標簽
docker pull registry.cn-hangzhou.aliyuncs.com/kube-iamges/flannel:v0.11.0-amd64
docker tag registry.cn-hangzhou.aliyuncs.com/kube-iamges/flannel:v0.11.0-amd64 quay.io/coreos/flannel:v0.11.0-amd64
#通過docker save -o來保存鏡像
docker save -o flannel.tar quay.io/coreos/flannel:v0.11.0-amd64
#把保存后的鏡像傳到其它的節點上
scp -r flannel.tar k8s-master02:/root/
scp -r flannel.tar k8s-master03:/root/
scp -r flannel.tar k8s-node01:/root/
scp -r flannel.tar k8s-node02:/root/
#把其它的節點上導入flannel鏡像
docker load -i flannel.tar
#在獲得的高可用vip的主master節點上運行kube-flannel.yaml
kubectl apply -f kube-flannel.yaml

26.測試高可用性
#在master03主節點上運行如下命令,把master01主節點的網絡斷開
while true; do sleep 1; kubectl get node;date; done
#會有短暫的時間獲取不到信息,之后就一直顯示正常


