k8s集群安裝
環境說明:
k8s-Master-Centos8 ip:192.168.152.53
k8s-Node1-Centos7 ip:192.168.152.253
k8s-Node2-Centos8 ip:192.168.152.252
注意:
Master與Node節點操作步驟基本一致
Node節點只需配置到本文的第6步即可
1、前期准備
#關閉防火牆 [root@Centos8 ~]# systemctl stop firewalld.service #關閉SElinux,永久關閉可修改/etc/selinux/config文件 [root@Centos8 ~]# setenforce 0 #關閉Swap分區,防止將K8S安裝至swap內存中 [root@Centos8 ~]# swapoff -a [root@Centos8 ~]# sed -i '/swap/ s/^/#/g' /etc/fstab #安裝iptables並設置為空規則及開機自啟 yum -y install iptables-services iptables systemctl enable --now iptables.service iptables -F #清空規則 service iptables save #保存 iptables -L #檢查是否為空規則
2、升級內核,建議 >= 4.4
#—————————————— Centos8 ————————————————# #——————Centos8最好不要做升級,如果非要要建議也升級到4.4 ————————# #——————以下只是演示升級過程,真實不要升級到5.6,貌似本身不支持nf_conntrack_ipv4 ————————# #安裝ELrepo倉庫 rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org yum install https://www.elrepo.org/elrepo-release-8.0-2.el8.elrepo.noarch.rpm #查看可用的系統內核包 yum --disablerepo="*" --enablerepo="elrepo-kernel" list available #安裝內核 yum --enablerepo=elrepo-kernel install kernel-ml # 安裝完畢后,查看現有所有內核 grubby --info=ALL #設置啟動內核 grubby --set-default /boot/vmlinuz-5.6.2-1.el8.elrepo.x86_64 #———————————————— Centos7 ————————————————# #下載內核rpm包 rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm #指定包名安裝新版kernel yum --enablerepo=elrepo-kernel install -y kernel-lt #設置默認啟動的內核 grub2-set-default 'CentOS Linux (4.4.218-1.el7.elrepo.x86_64) 7 (Core)' #———————————————— 以下相同 ————————————————# #重啟生效 systemctl reboot #重新查看內核版本 [root@Centos8 ~]# uname -r 5.6.2-1.el8.elrepo.x86_64
3、設置kube-proxy開啟ipvs的前置條件(centos7及以上一般默認都開啟)
modprobe br_netfilter cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
# 查看是否加載成功ipvs模塊
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 20480 4
nf_defrag_ipv4 16384 1 nf_conntrack_ipv4
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 147456 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 114688 9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c 16384 2 xfs,ip_vs
4、安裝Docker
#安裝環境 yum install -y yum-utils device-mapper-persistent-data lvm2 #添加docker倉庫 yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #安裝dpcker yum -y install docker-ce 報錯: package docker-ce-3:19.03.8-3.el7.x86_64 requires containerd.io >= 1.2.2-3, but none of the providers can be installed 先安裝containerd.io >= 1.2.2-3: dnf install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm 再次安裝docker: yum -y install docker-ce #啟動docker,並設置開機自啟 systemctl enable --now docker.service #配置daemon.設置默認的cgroup組為systemd,並使docker的日志以json形式輸出
#並將鏡像倉庫源更改為阿里雲鏡像源 cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" },
"registry-mirrors": ["https://f1bhsuge.mirror.aliyuncs.com"] } EOF #創建配置文件目錄 mkdir -p /etc/systemd/system/docker.service.d #重新加載daemon 及 重啟docker systemctl daemon-reload && systemctl restart docker.service
5、安裝Kubeadm(主從配置)
#配置yum倉庫 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg EOF #安裝kubeadm、kubectl、kubelet yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1 #設置開機自啟,暫時不開啟服務 systemctl enable kubelet.service
6、拉取初始化鏡像
(1)兩種方法,第一種可以通過以下腳本完成導入
vim initimage.sh ... #!/usr/bin/env bash K8S_VERSION=v1.15.1 ETCD_VERSION=3.3.10 DASHBOARD_VERSION=v1.8.3 FLANNEL_VERSION=v0.10.0-amd64 DNS_VERSION=1.3.1 PAUSE_VERSION=3.1 # 基本組件 docker pull mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION docker pull mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION docker pull mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION docker pull mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION docker pull mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION docker pull mirrorgooglecontainers/pause:$PAUSE_VERSION docker pull coredns/coredns:$DNS_VERSION # 修改tag docker tag mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION k8s.gcr.io/kube-apiserver:$K8S_VERSION docker tag mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION k8s.gcr.io/kube-controller-manager:$K8S_VERSION docker tag mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION k8s.gcr.io/kube-scheduler:$K8S_VERSION docker tag mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION k8s.gcr.io/kube-proxy:$K8S_VERSION docker tag mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION k8s.gcr.io/etcd:$ETCD_VERSION docker tag mirrorgooglecontainers/pause:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION docker tag coredns/coredns:$DNS_VERSION k8s.gcr.io/coredns:$DNS_VERSION #刪除冗余的images docker rmi mirrorgooglecontainers/kube-apiserver-amd64:$K8S_VERSION docker rmi mirrorgooglecontainers/kube-controller-manager-amd64:$K8S_VERSION docker rmi mirrorgooglecontainers/kube-scheduler-amd64:$K8S_VERSION docker rmi mirrorgooglecontainers/kube-proxy-amd64:$K8S_VERSION docker rmi mirrorgooglecontainers/etcd-amd64:$ETCD_VERSION docker rmi mirrorgooglecontainers/pause:$PAUSE_VERSION docker rmi coredns/coredns:$DNS_VERSION ... chmod +x initimage.sh #執行此腳本,開始導入鏡像... ./initimage.sh [root@Centos8 ]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217 4 weeks ago 52.8MB k8s.gcr.io/kube-scheduler v1.15.1 b0b3c4c404da 8 months ago 81.1MB k8s.gcr.io/kube-controller-manager v1.15.1 d75082f1d121 8 months ago 159MB k8s.gcr.io/kube-proxy v1.15.1 89a062da739d 8 months ago 82.4MB k8s.gcr.io/kube-apiserver v1.15.1 68c3eb07bfc3 8 months ago 207MB k8s.gcr.io/coredns 1.3.1 eb516548c180 15 months ago 40.3MB k8s.gcr.io/etcd 3.3.10 2c4adeb21b4f 16 months ago 258MB k8s.gcr.io/pause 3.1 da86e6ba6ca1 2 years ago 742kB
(2) 第二種可以在k8s配置文件初始化后,也就是下邊的第七步之后,再執行導入鏡像操作
7、初始化主節點
#打印k8s默認的初始化配置文件至kubeadm-conf.yaml中 kubeadm config print init-defaults > kubeadm-conf.yaml #修改kubeadm-conf.yaml vim kubeadm-conf.yaml ... apiVersion: kubeadm.k8s.io/v1beta2 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.152.53 #此處需要修改為你的真實ip bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock name: centos8 taints: - effect: NoSchedule key: node-role.kubernetes.io/master --- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: type: CoreDNS etcd: local: dataDir: /var/lib/etcd imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #鏡像站點的更改,也可以不改 kind: ClusterConfiguration kubernetesVersion: v1.15.1 #版本信息修改一致 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" #添加此配置,用於指定flannel的默認PodNet網段 serviceSubnet: 10.96.0.0/12 scheduler: {} --- #以下為新增配置,將默認的調度方式改為ipvs apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration featureGates: SupportIPVSProxyMode: true mode: ipvs ...
# 提前拉取配置文件種所需要的image,對應上邊第6步的(2)
kubeadm config images pull --config kubeadm-conf.yaml
#開始初始化 kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log 報錯: [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2 很明顯,cpu數目小於所需的2,重新設置一個虛擬機CPU個數就ok 初始化成功回顯: To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:50ca5375950abfa05cd4bd37dfb60e9ccd078083aeca49fa8bb6275c13d2a2cd #根據回顯創建文件及目錄 #目的為保存 kubectl 與 api server 交互時的緩存,交互過程為https協議 mkdir -p $HOME/.kube cp -i /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) $HOME/.kube/config #查看當前擁有節點,可見狀態為NotReady,是因為沒有添加網絡 [root@Centos8 .kube]# kubectl get node NAME STATUS ROLES AGE VERSION centos8 NotReady master 4m1s v1.15.1
8、添加flannel網絡
(1)第一種
mkdir -p install-k8s/plugin/flannel mkdir -p install-k8s/core cd install-k8s/core mv /etc/kubernetes/kubeadm-init.log /etc/kubernetes/kubeadm-config.yaml ./ cd ../plugin/flannel #下載flannel.yml wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml #執行yaml文件 kubectl create -f kube-flannel.yml
#構建完成,查看當前名稱空間為kube-system的pod狀態,-n 指定名稱空間 [root@Centos8 core]# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE coredns-5c98db65d4-5gwmj 0/1 CrashLoopBackOff 22 91m coredns-5c98db65d4-c277w 0/1 CrashLoopBackOff 22 91m etcd-centos8 1/1 Running 0 90m kube-apiserver-centos8 1/1 Running 0 90m kube-controller-manager-centos8 1/1 Running 0 90m kube-flannel-ds-amd64-ggghn 1/1 Running 0 8m45s kube-proxy-gslw2 1/1 Running 0 91m kube-scheduler-centos8 1/1 Running 0 90m #構建完成后,網卡界面會顯示flannel信息 [root@Centos8 core]# ifconfig flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450 inet 10.244.0.0 netmask 255.255.255.255 broadcast 0.0.0.0 inet6 fe80::4019:beff:fe7c:5582 prefixlen 64 scopeid 0x20<link> ether 42:19:be:7c:55:82 txqueuelen 0 (Ethernet) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 13 overruns 0 carrier 0 collisions 0 #網絡加載成功,狀態變為Ready [root@Centos8 ~]# kubectl get node NAME STATUS ROLES AGE VERSION centos8 Ready master 53d v1.15.1
(2)第二種,當 kubectl create -f kube-flannel.yml 時,鏡像導入不成功,或者因為網絡問題導致失敗時
# 修改主機hosts文件 echo "199.232.28.133 raw.githubusercontent.com" >> /etc/hosts # 然后下載flannel文件 curl -o kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml # 編輯鏡像源,默認的鏡像地址我們修改一下。把yaml文件中所有的quay.io 修改為quay-mirror.qiniu.com sed -i 's/quay.io/quay-mirror.qiniu.com/g' kube-flannel.yml # 最后再執行創建命令就可以啦 kubectl apply -f kube-flannel.yml
9、配置從節點與主節點關聯
# 直接將kubeadm-init.log的最后一句回顯在node節點運行即可 [root@kube-node2 ~]# kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \ > --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da [root@kube-node2 ~]# kubeadm join 192.168.152.53:6443 --token abcdef.0123456789abcdef \ > --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da
遇到的問題及啟動服務報錯:
1、W0412 04:57:19.803140 846 watcher.go:87] Error while processing event ("/sys/fs/cgroup/devices/libcontainer_30695_systemd_test_default.slice": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/devices/libcontainer_30695_systemd_test_default.slice: no such file or directory
## 問題的原因是docker與kubelet的cgroup組不一致
## 查看kubeadm的配置文件位置
[root@Centos8 ~]# rpm -ql kubeadm /usr/bin/kubeadm /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf ## 修改配置文件,將kubelet的cgroup改為systemd vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf ... Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd" ... ## 重新加載配置文件及重啟服務 [root@Centos8 ~]# systemctl daemon-reload [root@Centos8 ~]# systemctl restart kubelet.service
2、node2節點的flannel一直報錯
## 還未找到根本原因,正在排錯,如有大佬路過,希望指點
kube-flannel-ds-amd64-b47l9 0/1 Init:ErrImagePull 0 3m50s
3、如果需要重新安裝master節點,執行:
# kubeadm reset
然后再進行重新安裝
4、如果需要重新安裝node節點,執行:
# kubectl drain <node name> --delete-local-data --force --ignore-daemonsets
# kubectl delete node <node name> 或 kubeadm reset #直接重新join
5、node節點加入master時報錯:error execution phase preflight: couldn't validate the identity of the API Server: abort connecting to API servers after timeout of 5m0s
原因:master的token過期了,需要重新創建
解決:
Master:
[root@Centos8 ~]# kubeadm token create blopur.fn8gtr06gsjlq7yi Node: kubeadm join 192.168.152.53:6443 --token blopur.fn8gtr06gsjlq7yi --discovery-token-ca-cert-hash sha256:c291b4fc646b5925299f8cdf7fafe33ad9c0505a1609041d8c8214d104eb08da
6、node2節點加入master后,systemctl status kubelet.service報錯:Unable to update cni config: No networks found in /etc/cni/net.d
vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
...
添加:
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin" ... systemctl daemon-reload && systemctl restart kubelet.service
7、node2節點加入master后,systemctl status kubelet.service報錯:open /run/flannel/subnet.env: no such file or directory
## 將master節點的/run/flannel/subnet.env文件拷貝到此處即可
[root@Centos8 flannel]# scp subnet.env kubenode2:/run/flannel/ ## 重啟 systemctl restart kubelet.service
補充:
node節點為了方便以后的部署和擴展,我采用Ansible自動化安裝docker及k8s。
ansible服務端為k8s的MASTER端:192.168.152.53
客戶端根據node節點需求來控制
#### 注意:所有的操作及文件都在192.168.152.53中 #### [root@Centos8 ansible]# pwd /root/ansible *** 先編輯好導入導出images的腳本 *** [root@Centos8 ansible]# cat saveImages.sh #導出鏡像腳本 #!/usr/bin/env bash IMAGESNAME=(`docker images | awk '/ago/{print $1}'`) IMAGESTAG=(`docker images | awk '/ago/{print $2}'`) IPADDR1='192.168.152.253' IPADDR2='192.168.152.252' if [[ -d /root/images ]];then for i in `seq 0 6`;do docker save > /root/images/${i}.tar.gz ${IMAGESNAME[$i]}:${IMAGESTAG[$i]} scp /root/images/${i}.tar.gz ${IPADDR1}:/root/ scp /root/images/${i}.tar.gz ${IPADDR2}:/root/ done else mkdir -p /root/images for i in `seq 0 6`;do docker save > /root/images/${IMAGESNAME[$i]}.tar.gz ${IMAGESNAME[$i]}:${IMAGESTAG[$i]} scp /root/images/${i}.tar.gz ${IPADDR1}:/root/ scp /root/images/${i}.tar.gz ${IPADDR2}:/root/ done fi [root@Centos8 ansible]# cat loadImages.sh #導入鏡像腳本 #!/usr/bin/env bash for i in `seq 0 6`;do docker load < /root/${i}.tar.gz && rm -f /root/${i}.tar.gz done *** 在本地執行saveImages.sh *** ./saveImages.sh *** 創建PlayBook *** [root@Centos8 ansible]# vim kuber.yaml ... --- - name: Install docker and k8s hosts: all tasks: - block: - name: Add repository shell: yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo - name: install docker-ass yum: name: - yum-utils - device-mapper-persistent-data - lvm2 - https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm - docker-ce state: latest - name: mkdir docker.service.d file: path: '{{ item }}' state: directory loop: - /etc/docker - /etc/systemd/system/docker.service.d - name: Copy daemon.json copy: src: /etc/docker/daemon.json dest: /etc/docker/daemon.json - name: daemon-reload shell: systemctl daemon-reload - name: Start docker service: name: docker state: restarted enabled: yes rescue: - debug: msg: 'docker Installation failed!' - name: Copy using inline content copy: content: '[kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg' dest: /etc/yum.repos.d/kubernetes.repo - name: Install k8s yum: name: - kubeadm-1.15.1 - kubectl-1.15.1 - kubelet-1.15.1 state: present - name: Start kubelet service: name: kubelet state: started enabled: yes - name: Run script load Images script: /root/ansible/loadImages.sh ... ansible-playbook kuber.yaml #執行,完成,最后在node節點再執行以上第8步即可