Kubernetes集群環境搭建全過程


資源准備以及服務器初始化

所有服務器執行一下腳本進行配置信息初始化:

#!/bin/bash
cd `dirname $0`
 
# 關閉selinux
setenforce 0
sed -i '/SELINUX/s/enforcing/disabled/g' /etc/selinux/config
# 禁用NetworkManager
systemctl stop NetworkManager
systemctl disable NetworkManager
# 調整log級別
#sed -i 's/\#LogLevel=info/LogLevel=notice/g' /etc/systemd/system.conf
#systemctl daemon-reexec
# 配置ssh
sed -i -e "/GSSAPIAuthentication/s/yes/no/g" -e "/GSSAPICleanupCredentials/s/yes/no/g" -e"s/^#UseDNS\ no/UseDNS\ no/" -e"s/^#UseDNS\ yes/UseDNS\ no/" /etc/ssh/sshd_config
echo -ne "ClientAliveInterval 60\nClientAliveCountMax 10" >> /etc/ssh/sshd_config
systemctl restart sshd
 
cat >> /etc/ssh/ssh_config <<'EOF'
Host *
   StrictHostKeyChecking no
   UserKnownHostsFile=/dev/null
EOF
 
# 阿里源
rm -f /etc/yum.repos.d/CentOS-Base.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
# 阿里epel源
rm -f /etc/yum.repos.d/epel.repo
rm -f /etc/yum.repos.d/epel-testing.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
 
# 安裝軟件
yum clean all
yum update -y
yum install -y iptables-services vim wget net-tools iperf3 telnet lvm2 tree screen tmux rsync lrzsz zip unzip xz ntpdate zlib sysstat hdparm htop iotop iftop dstat nmap mtr mlocate bind-utils ipmitool pciutils parted acpid man bash-completion* lsof bc ncdu
 
systemctl start acpid && systemctl enable acpid
systemctl stop firewalld && systemctl disable firewalld
# iptables設置
iptables -F
service iptables save
systemctl enable iptables
 
chmod +x /etc/rc.d/rc.local
 
# custom
cat >> /etc/bashrc <<'EOF'
export PATH=/usr/local/bin:$PATH
export TERM=xterm-256color
export EDITOR=/usr/bin/vim
# man page
export LESS_TERMCAP_mb=$(printf '\e[01;31m')
export LESS_TERMCAP_md=$(printf '\e[01;35m')
export LESS_TERMCAP_me=$(printf '\e[0m')
export LESS_TERMCAP_se=$(printf '\e[0m')
export LESS_TERMCAP_so=$(printf '\e[1;31;46m')
export LESS_TERMCAP_ue=$(printf '\e[0m')
export LESS_TERMCAP_us=$(printf '\e[04;36m')
# history tune
shopt -s histappend
export HISTTIMEFORMAT="%F %T "
export HISTCONTROL=ignoredups:erasedups
export HISTSIZE=100000
export HISTFILESIZE=100000
export PROMPT_COMMAND="history -a"
# PS1
export PS1="\[\e[36m\]\u\[\e[m\]\[\e[37m\]@\[\033[01;32m\]\h\[\033[00m\]:\[\033[01;34m\]\W\[\033[00m\]\\$ "
EOF
# 抑制systemd session的log
echo 'if $programname == "systemd" and ($msg contains "Starting Session" or $msg contains "Started Session" or $msg contains "Created slice" or $msg contains "Starting user-" or $msg contains "Starting User Slice of" or $msg contains "Removed session" or $msg contains "Removed slice User Slice of" or $msg contains "Stopping User Slice of") then stop' > /etc/rsyslog.d/ignore-systemd-session-slice.conf
systemctl restart rsyslog.service
# vim 
cat >> /etc/vimrc <<'EOF'
set ts=4
set paste
set encoding=utf-8
set nocompatible
syntax on
EOF
 
# 禁用ipv6
cat >> /etc/sysctl.d/99-sysctl.conf <<'EOF'
# disable ipv6
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
EOF
 
# 優化
cat >> /etc/sysctl.conf <<'EOF'
sunrpc.tcp_slot_table_entries = 128
net.core.rmem_default = 4194304
net.core.wmem_default = 4194304
net.core.rmem_max = 4194304
net.core.wmem_max = 4194304
net.ipv4.tcp_rmem = 4096 1048576 4194304
net.ipv4.tcp_wmem = 4096 1048576 4194304
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_syncookies = 1
net.core.netdev_max_backlog = 300000
vm.swappiness = 0
EOF
 
sysctl -p
 
# ulimit 相關
cat > /etc/security/limits.d/20-nproc.conf <<'EOF'
# Default limit for number of user's processes to prevent
# accidental fork bombs.
# See rhbz #432903 for reasoning.
 
# nproc
root       soft    nproc     unlimited
root       hard    nproc     unlimited
*          hard    nproc     65535
*          soft    nproc     65535
# nofile
*          soft    nofile     65535
*          hard    nofile     65535
EOF
 
# 修改systemd系統ulimit
cat >> /etc/systemd/system.conf <<'EOF'
DefaultLimitCORE=infinity
DefaultLimitNOFILE=100000
DefaultLimitNPROC=100000
EOF
 
# 修改systemd用戶ulimit
cat >>  /etc/systemd/user.conf <<'EOF'
DefaultLimitCORE=infinity
DefaultLimitNOFILE=100000
DefaultLimitNPROC=100000
EOF

#關閉swap分區
swapoff -a

systemctl daemon-reload

按需對服務器改名:

# 臨時修改
hostnamectl set-hostname k8s-centos-node-01
# 需要重啟
cat >> /etc/sysconfig/network <<'EOF'
hostname=k8s-centos-node-01
EOF

按需對服務器更新HOST(vim /etc/hosts):

cat >> /etc/hosts <<'EOF'
192.168.83.137 k8s-centos-node-01
192.168.83.138 k8s-centos-node-02
EOF
主機名 服務器IP 角色 資源要求
k8s-centos-node-01 192.168.83.137 Master 2cpu,4G內存
k8s-centos-node-02 192.168.83.138 Worker 2cpu,4G內存
統一K8S環境安裝

所有服務器運行一下腳本進行安裝Docker,K8S:

systemctl stop firewalld
systemctl disable firewalld
# vi /etc/fstab
swapoff -a
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
sudo yum -y install docker-ce
sudo service docker start
systemctl enable docker
systemctl start docker
docker -v
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://n3jwp4vw.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
cat>>/etc/yum.repos.d/kubrenetes.repo<<EOF
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
yum install -y kubeadm kubelet kubectl
systemctl start docker.service
systemctl enable docker.service
部署K8S Master節點

首先:vim /etc/sysconfig/kubelet 加入KUBELET_EXTRA_ARGS="--fail-swap-on=false"

對Master節點進行初始化:

kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=cri

初始化完成后需要配置環境變量執行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

export KUBECONFIG=/etc/kubernetes/admin.conf
# 重啟Docker 中所有容器
docker restart $(docker ps -a -q)

初始化完成后需要記錄加入集群信息(下面命令是工作節點加入集群使用的):

kubeadm join 192.168.83.137:6443 --token 1hgysy.4tzn3t9wm2uekcnc \
    --discovery-token-ca-cert-hash sha256:93fcf52fcb5151d55e8a9ab44c8d6061f2364c3e570e0fa986e4f4a84d159f84

運行成功后就可以查節點了

root@k8s-centos-node-01:~# kubectl get node
NAME                 STATUS     ROLES                  AGE     VERSION
k8s-centos-node-01   NotReady   control-plane,master   2m35s   v1.20.5

節點裝未NotReady,需要安裝網絡組件,執行以下命令:

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

再次查看節點信息未Ready狀態

root@k8s-centos-node-01:~# kubectl get node
NAME                 STATUS   ROLES                  AGE     VERSION
k8s-centos-node-01   Ready    control-plane,master   5m17s   v1.20.5

到此為止Master節點即部署完成。

K8S集群增加工作節點

在工作節點機器中導入集群Master節點配置信息:

mkdir /root/.kube
cd /root/.kube
#然后新建一個文件config,將master節點中/root/.kube/config 文件下的內容復制到節點中config文件中。

加入工作節點:

kubeadm join 192.168.83.137:6443 --token ch7d4g.ck77q6p4uhtsmogn --discovery-token-ca-cert-hash sha256:93fcf52fcb5151d55e8a9ab44c8d6061f2364c3e570e0fa986e4f4a84d159f84  --ignore-preflight-errors=Swap

如果忘記之前主節點的token,再master上執行一下命令查看:

root@k8s-centos-node-01:~# kubeadm token list
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
1hgysy.4tzn3t9wm2uekcnc   23h         2021-03-21T16:24:28+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token

或者我們也可以在master節點新建token:

kubeadm create token

但是需要注意的是默認情況下,通過 kubeadm create token 創建的 token ,過期時間是24小時,這就是為什么過了一天無法再次使用之前記錄的 kube join 原生腳本的原因,也可以運行 kubeadm token create --ttl 0生成一個永不過期的 token然后通過kubectl get node命令就可以看到Node的節點信息了。

root@k8s-centos-node-01:~# kubectl get nodes
NAME                 STATUS   ROLES                  AGE   VERSION
k8s-centos-node-01   Ready    control-plane,master   16m   v1.20.5
k8s-centos-node-02   Ready    <none>                 65s   v1.20.5

給從節點修改ROELS:

kubectl label node k8s-centos-node-02 node-role.kubernetes.io/worker=worker

root@k8s-centos-node-01:~# kubectl get nodes
NAME                 STATUS   ROLES                  AGE     VERSION
k8s-centos-node-01   Ready    control-plane,master   19m     v1.20.5
k8s-centos-node-02   Ready    worker                 4m25s   v1.20.5

到此為止K8S工作節點部署就完成了。

部署K8S控制台

通過官方yaml進行部署:

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

查看pod:

root@k8s-centos-node-01:src# kubectl -n kubernetes-dashboard get pods
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-7b59f7d4df-d5xqx   1/1     Running   0          54s
kubernetes-dashboard-74d688b6bc-6jdvn        1/1     Running   0          54s

查看服務:

root@k8s-centos-node-01:src# kubectl -n kubernetes-dashboard get svc 
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
dashboard-metrics-scraper   ClusterIP   10.107.209.252   <none>        8000/TCP   84s
kubernetes-dashboard        ClusterIP   10.99.183.21     <none>        443/TCP    85s

這里作為演示,使用nodeport方式將dashboard服務暴露在集群外,指定使用30443端口,可自定義:

kubectl  patch svc kubernetes-dashboard -n kubernetes-dashboard \
-p '{"spec":{"type":"NodePort","ports":[{"port":443,"targetPort":8443,"nodePort":30443}]}}'

通過nodeip進行訪問:https://192.168.83.138:30443/#/login

Dashboard 支持 Kubeconfig 和 Token 兩種認證方式,我們這里選擇Token認證方式登錄。:

#創建部署文件
touch dashboard-adminuser.yaml

cat > dashboard-adminuser.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard  
EOF

#創建登陸用戶
kubectl apply -f dashboard-adminuser.yaml

查看賬號token:

root@k8s-centos-node-01:src# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-x8n9p
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 034a32c2-dcc2-48a3-b11a-ab9af557d312

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6InNKTVRfcUFES1M2Mzh4YW9zSVEyaXFidDJoUTJGRnN0RDVmOXRpZDV4cUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXg4bjlwIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIwMzRhMzJjMi1kY2MyLTQ4YTMtYjExYS1hYjlhZjU1N2QzMTIiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.Hx-zaeX2VyWR0fz-XRiyBMycm0MeYw2EBkUiWdfi46SbGKr_MqB0oMP3n1uxFA1yVShXrdG4-ukH3cPytXxeDr-0Smyg2b9H3zC1Ikv_pY8ULtzwJRSpdjApKj3W_2eLCIXzW47TBIzr1IbSQu9Vz-UOLxhsOOsKnfIahRpVJRLm2GsHrpufumRZkcYG-7PdIVctrTMR0UK-VRHF-zQiwuq-R6wWlviJI3pxWnZsjQuqNn5bFMtrjYc0idKQfrS3JdhQ8Yu6h2JzY4q5qFLL-7b96JYnTiuUfDuC0c1--u9lCXyb0d2Vgz72k2eTbh79DiR3BApyuqy2OSuSJT05OQ

在Web Ui中輸入上面的token即可完成登陸。

image-20210320171454444

image-20210320171506871


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM