可以參考其他網友的阿里雲搭建k8s高可用集群(1.17.3)
https://www.cnblogs.com/gmmy/p/12372805.html
准備四台centos7虛擬機,用來安裝k8s集群
master01(192.168.1.203)配置:2核cpu,2G內存,60G硬盤 橋接網絡
master02(192.168.1.204)配置:2核cpu,2G內存,60G硬盤 橋接網絡
master03(192.168.1.205)配置:2核cpu,2G內存,60G硬盤 橋接網絡
node01(192.168.1.206)配置:2核cpu,1G內存,60G硬盤 橋接網絡
所有master和node節點都要安裝的基礎組件
#以下教程非一鍵式,需要逐行查看,在提示的服務器中一一部署
#在大部分情況下,可以直接復制代碼到命令行中,需要一點點的Linux基礎知識
#請根據自己的情況,修改自己的主機名(如master01,master02,master03,node01)
#在master01
hostnamectl set-hostname master01
#在master02
hostnamectl set-hostname master02
#在master03
hostnamectl set-hostname master03
#在node01
hostnamectl set-hostname node01
#在master01, master02, master03, node01上/etc/hosts文件增加如下幾行:
cat >> /etc/hosts << EOF
192.168.1.202 master01
192.168.1.203 master02
192.168.1.204 master03
192.168.1.205 node01
EOF
#設置免密登陸的密鑰
#默認相關內容放在/root/.ssh/下面
#在所有master節點運行如下
mkdir /root/.ssh/
chmod 600 /root/.ssh/
touch /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
ssh-keygen -t rsa #enter ,enter, enter
#yum安裝一些必備的軟件
yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake\
libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel\
wget vim ncurses-devel autoconf automake zlib-devel python-devel\
epel-release lrzsz openssh-server socat ipvsadm conntrack bind-utils epel-release libffi-devel\
libaio-devel libxml2-devel cmake python-devel\
device-mapper-persistent-data lvm2 yum-utils
#關閉防火牆
systemctl stop firewalld && systemctl disable firewalld
yum install iptables-services -y
iptables -F && service iptables save
service iptables stop && systemctl disable iptables
#修改時區,設置ntp時間更新
mv -f /etc/localtime /etc/localtime.bak
/bin/cp -rf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'ZONE="CST"' > /etc/sysconfig/clock
ntpdate cn.pool.ntp.org
echo "* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org" >> /etc/crontab
service crond restart
#關閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
#開發最大文件描述符限制
cat /etc/profile |grep ulimit || echo "ulimit -n 65536" >> /etc/profile
cat /etc/security/limits.conf |grep 65536 || echo "root soft nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "root hard nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "* soft nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "* hard nofile 65536" >> /etc/security/limits.conf
#關閉swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
#更換yum源到阿里的
mv -f /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#配置安裝k8s需要的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
#配置docker yum源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#清理緩存
yum clean all
yum makecache fast
#安裝19.03.7版本
yum install -y docker-ce-19.03.7-3.el7
systemctl enable docker && systemctl start docker
systemctl status docker
#修改docker配置文件
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
systemctl daemon-reload && systemctl restart docker
#開啟k8s 網絡橋接相關內核配置
#設置網橋包經IPTables,core文件生成路徑,配置永久生效
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
cat >> /etc/sysctl.conf << EOF
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl -p
#開啟ipvs,不開啟ipvs將會使用iptables,但是效率低,所以官網推薦需要開通ipvs內核
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep ip_vs
#在master01、master02、master03和node01上安裝kubeadm,kubelet和kubectl
yum install kubeadm-1.18.2 kubelet-1.18.2 kubectl-1.18.2 -y && systemctl enable kubelet && systemctl start kubelet
Master節點的安裝和配置
#在master01,master02,master03上部署keepalive+lvs實現master節點高可用-對apiserver做高可用
yum install -y socat keepalived ipvsadm conntrack
#修改master1的/etc/keepalived/keepalived.conf文件
#master01節點作如下操作,該配置的priority為100, 請根據自己的masterIP地址和期望的虛擬IP地址做相應的修改
#如下的虛擬IP地址是192.168.1.199
wget -O /etc/keepalived/keepalived.conf http://download.zhufunin.com/k8s_1.18/keepalived.conf
sed -i 's/master01/192.168.1.202/g' /etc/keepalived/keepalived.conf
sed -i 's/master02/192.168.1.203/g' /etc/keepalived/keepalived.conf
sed -i 's/master03/192.168.1.204/g' /etc/keepalived/keepalived.conf
sed -i 's/VIP_addr/192.168.1.199/g' /etc/keepalived/keepalived.conf
#在master02節點作如下操作,修改priority 為 50, 請根據自己的masterIP地址和期望的虛擬IP地址做相應的修改
#如下的虛擬IP地址是192.168.1.199
wget -O /etc/keepalived/keepalived.conf http://download.zhufunin.com/k8s_1.18/keepalived.conf
sed -i 's/priority 100/priority 50/g' /etc/keepalived/keepalived.conf
sed -i 's/master01/192.168.1.202/g' /etc/keepalived/keepalived.conf
sed -i 's/master02/192.168.1.203/g' /etc/keepalived/keepalived.conf
sed -i 's/master03/192.168.1.204/g' /etc/keepalived/keepalived.conf
sed -i 's/VIP_addr/192.168.1.199/g' /etc/keepalived/keepalived.conf
#在master03節點作如下操作,修改priority 為 30, 請根據自己的masterIP地址和期望的虛擬IP地址做相應的修改
#如下的虛擬IP地址是192.168.1.199
wget -O /etc/keepalived/keepalived.conf http://download.zhufunin.com/k8s_1.18/keepalived.conf
sed -i 's/priority 100/priority 30/g' /etc/keepalived/keepalived.conf
sed -i 's/master01/192.168.1.202/g' /etc/keepalived/keepalived.conf
sed -i 's/master02/192.168.1.203/g' /etc/keepalived/keepalived.conf
sed -i 's/master03/192.168.1.204/g' /etc/keepalived/keepalived.conf
sed -i 's/VIP_addr/192.168.1.199/g' /etc/keepalived/keepalived.conf
#如果你的主網卡名稱是ens33,那就需要sed -i 's/eth0/ens33/g' /etc/keepalived/keepalived.conf
sed -i 's/eth0/eth0/g' /etc/keepalived/keepalived.conf
#新下載的keepalived.conf, 主要是讓keepalive配置為BACKUP模式,而且是非搶占模式nopreempt,假設master01宕機,啟動之后vip不會自動漂移到master01,這樣可以保證k8s集群始終處於正常狀態,因為假設master01啟動,apiserver等組件不會立刻運行,如果vip漂移到master01,那么整個集群就會掛掉,這就是為什么我們需要配置成非搶占模式了
#通過修改priority的值,讓啟動順序master01->master02->master03
#在master1、master2、master3依次執行如下命令
systemctl enable keepalived && systemctl start keepalived && systemctl status keepalived
#keepalived啟動成功之后,在master1上通過ip addr可以看到vip 192.168.1.199(本教程的虛擬IP) 已經綁定到網卡上了
#在master01上執行如下命令
cd /usr/local/src
wget -O /usr/local/src/kubeadm-config.yaml http://download.zhufunin.com/k8s_1.18/kubeadm-config.yaml
#這個文件是給master初始化使用,如下是修改初始化中節點所對應的IP地址,需要根據自己的情況,做適當的調整
sed -i 's/master01/192.168.1.202/g' kubeadm-config.yaml
sed -i 's/master02/192.168.1.203/g' kubeadm-config.yaml
sed -i 's/master03/192.168.1.204/g' kubeadm-config.yaml
sed -i 's/VIP_addr/192.168.1.199/g' kubeadm-config.yaml
#master01初始化命
kubeadm init --config kubeadm-config.yaml
kubeadm config images list
#10.244.0.0/16是flannel網絡插件的默認網段,后面會用到
#如果報錯,那么用下面的kubeadm-config.yaml文件多一個imageRepository: registry.aliyuncs.com/google_containers參數,表示走的是阿里雲鏡像,我們可以直接訪問,這個方法更簡單,但是在這里了解即可,先不使用這種方法,使用的話在后面手動加節點到k8s集群會有問題。
#本教程會需要的鏡像
#如果有報錯,可以重置
kubeadm reset
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /var/lib/kubelet/
rm -rf /var/lib/etcd
rm -rf /var/lib/dockershim
rm -rf /var/run/kubernetes
rm -rf /var/lib/cni
rm -rf /etc/cni/net.d
#下面是手動下載到本機
wget http://download.zhufunin.com/k8s_1.18/1-18-kube-apiserver.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-kube-scheduler.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-kube-controller-manager.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-pause.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-cordns.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-etcd.tar.gz
wget http://download.zhufunin.com/k8s_1.18/1-18-kube-proxy.tar.gz
docker load -i 1-18-kube-apiserver.tar.gz
docker load -i 1-18-kube-scheduler.tar.gz
docker load -i 1-18-kube-controller-manager.tar.gz
docker load -i 1-18-pause.tar.gz
docker load -i 1-18-cordns.tar.gz
docker load -i 1-18-etcd.tar.gz
docker load -i 1-18-kube-proxy.tar.gz
echo """
說明:
pause版本是3.2,用到的鏡像是k8s.gcr.io/pause:3.2
etcd版本是3.4.3,用到的鏡像是k8s.gcr.io/etcd:3.4.3-0
cordns版本是1.6.7,用到的鏡像是k8s.gcr.io/coredns:1.6.7
apiserver、scheduler、controller-manager、kube-proxy版本是1.18.2,用到的鏡像分別是
k8s.gcr.io/kube-apiserver:v1.18.2
k8s.gcr.io/kube-controller-manager:v1.18.2
k8s.gcr.io/kube-scheduler:v1.18.2
k8s.gcr.io/kube-proxy:v1.18.2
如果機器很多,我們只需要把這些鏡像傳到我們的內部私有鏡像倉庫即可,這樣我們在kubeadm初始化kubernetes時可以通過"--image-repository=私有鏡像倉庫地址"的方式進行鏡像拉取,這樣不需要手動傳到鏡像到每個機器
"""
#走完這一步,差不多要結束了
#初始化成功后會看到類似如下的,照做就好
#mkdir -p $HOME/.kube
#sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
#sudo chown $(id -u):$(id -g) $HOME/.kube/config
#看到kubeadm join ...這條命令需要記住,我們把k8s的master02、master03,node01節點加入到集群需要在這些節點節點輸入這條命令,每次執行這個結果都是不一樣的,大家記住自己執行的結果,在下面會用到
#查看狀態
kubectl get nodes
#把master1節點的證書拷貝到master02和master03上
#在master2和master3上創建證書存放目錄
cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
#在master1節點把證書拷貝到master2和master3上,在master1上操作如下,下面的scp命令大家最好一行一行復制,這樣不會出錯
scp /etc/kubernetes/pki/ca.crt master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key master02:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt master02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key master02:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/ca.crt master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.key master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.key master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.pub master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.crt master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.key master03:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.crt master03:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.key master03:/etc/kubernetes/pki/etcd/
#證書拷貝之后在master2和master3上執行如下命令,大家復制自己的,這樣就可以把master2和master3加入到集群
#類似kubeadm join 192.168.1.199:6443 --token 7dwluq.x6nypje7h55rnrhl \
--discovery-token-ca-cert-hash sha256:fa75619ab0bb6273126350a9dbda9aa6c89828c2c4650299fe1647ab510a7e6c --control-plane
#--control-plane:這個參數表示加入到k8s集群的是master節點
#在master2和master3上操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g)$HOME/.kube/config
kubectl get nodes
#在master01節點部署calico.yaml,master01就是主控,其他的master02,master03就是備用而已
wget http://download.zhufunin.com/k8s_1.18/calico.yaml #(原地址https://raw.githubusercontent.com/luckylucky421/kubernetes1.17.3/master/calico.yaml)
kubectl apply -f calico.yaml
把node01節點加入到k8s集群,在node1節點操作
確保已完成“上面”的所有master和node節點都要安裝的基礎組件內容
特別是yum install kubeadm-1.18.2 kubelet-1.18.2 kubectl-1.18.2 -y && systemctl enable kubelet && systemctl start kubelet
類似kubeadm join 192.168.1.199:6443 --token 7dwluq.x6nypje7h55rnrhl \
--discovery-token-ca-cert-hash sha256:fa75619ab0bb6273126350a9dbda9aa6c89828c2c4650299fe1647ab510a7e6c
kubeadm 報錯那就在最后面添加-v 6參數,查看更多信息
如果忘記kubeadm join的參數,可以在master節點中運行下面的命令來查詢
kubeadm token create --print-join-command
在master01節點查看集群節點狀態
kubectl get nodes
顯示如下:
NAME STATUS ROLES AGE VERSION
master1 Ready master 3m36s v1.18.2
master2 Ready master 3m36s v1.18.2
master3 Ready master 3m36s v1.18.2
node1 Ready <none> 3m36s v1.18.2
如果忘記kubeadm join的參數,可以在master節點中運行下面的命令來查詢
kubeadm token create --print-join-command
通用node節點初始化
#設置hostname, 並且把hostname寫到Master的hosts文件中
hostnamectl set-hostname xxxx
#必要的yum軟件安裝
yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake\
libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel\
wget vim ncurses-devel autoconf automake zlib-devel python-devel\
epel-release lrzsz openssh-server socat ipvsadm conntrack bind-utils epel-release libffi-devel\
libaio-devel libxml2-devel cmake python-devel\
device-mapper-persistent-data lvm2 yum-utils
#關閉防火牆
systemctl stop firewalld && systemctl disable firewalld
yum install iptables-services -y
iptables -F && service iptables save
service iptables stop && systemctl disable iptables
#修改時區,設置ntp時間更新
mv -f /etc/localtime /etc/localtime.bak
/bin/cp -rf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'ZONE="CST"' > /etc/sysconfig/clock
ntpdate cn.pool.ntp.org
echo "* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org" >> /etc/crontab
service crond restart
#關閉selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
setenforce 0
#開發最大文件描述符限制
cat /etc/profile |grep ulimit || echo "ulimit -n 65536" >> /etc/profile
cat /etc/security/limits.conf |grep 65536 || echo "root soft nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "root hard nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "* soft nofile 65536" >> /etc/security/limits.conf
cat /etc/security/limits.conf |grep 65536 || echo "* hard nofile 65536" >> /etc/security/limits.conf
#關閉swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
#更換yum源到阿里的,可以不換
#mv -f /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
#wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#配置安裝k8s需要的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
#配置docker yum源,如果你的很慢,可以選擇下方的阿里雲docker yum源
#yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#清理緩存
yum clean all
yum makecache fast
#安裝19.03.7版本
yum install -y docker-ce-19.03.7-3.el7
systemctl enable docker && systemctl start docker
systemctl status docker
#修改docker配置文件
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
systemctl daemon-reload && systemctl restart docker
#開啟k8s 網絡橋接相關內核配置
#設置網橋包經IPTables,core文件生成路徑,配置永久生效
echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
cat >> /etc/sysctl.conf << EOF
vm.swappiness = 0
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl -p
#安裝kubeadm,kubelet和kubectl
yum install kubeadm-1.18.2 kubelet-1.18.2 kubectl-1.18.2 -y && systemctl enable kubelet && systemctl start kubelet
#類似kubeadm join 192.168.1.199:6443 --token 7dwluq.x6nypje7h55rnrhl \
--discovery-token-ca-cert-hash sha256:fa75619ab0bb6273126350a9dbda9aa6c89828c2c4650299fe1647ab510a7e6c
#開啟ipvs,不開啟ipvs將會使用iptables,但是效率低,所以官網推薦需要開通ipvs內核
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep ip_vs
如果忘記kubeadm join的參數,可以在master節點中運行下面的命令來查詢
kubeadm token create --print-join-command