本文搭建的環境不建議在生產環境使用,因為 k8s 所有數據都保存在 etcd 中, 生產環境你最起碼得有個 etcd 集群吧..., 開發自己本地測試學習應該是沒問題的
本文環境
docker: 20.10.8
k8s: 1.21.0
kubeadm: 1.21.4
kubelet: 1.21.4
kubectl: 1.21.4
系統版本: CentOS Linux release 8.4.2105
網絡插件: flannel
兩台虛擬機:
192.168.5.128 k8s-master
192.168.5.129 k8s-node-1
配置均為 2核2G/20GB
前置工作
前置工作需要在所有的節點上執行
配置要求
CPU 推薦兩核或者更多
內存 不得小於 2G
MAC地址 保證唯一
交換分區 禁用
節點之間保持網絡通暢
修改主機名
各個節點修改成自己的名字
hostnamectl set-hostname <name>
修改 hosts
配置各個節點的
ip
和主機名
映射
# vim /etc/hosts
192.168.5.128 k8s-master
關閉防火牆, 開啟內核網絡參數
systemctl stop firewalld
systemctl disable firewalld
# vi /etc/sysctl.conf # 編輯配置文件
# 追加下面兩行
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
sysctl -p # 應用配置
關閉 SELinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
關閉 swap
注釋掉 /etc/fstab 文件中包含 swap 哪一行, 如下文件內容示例注釋
# vim /etc/fstab
# Created by anaconda on Wed Jan 6 20:22:34 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / ext4 defaults 1 1
UUID=b6a81016-1920-44c6-b713-2547ccbc9adf /boot ext4 defaults 1 2
/dev/mapper/centos-home /home ext4 defaults 1 2
# /dev/mapper/centos-swap swap swap defaults 0 0
重啟
reboot
安裝 Docker
所有的節點都必須安裝
docker
且設置服務為開機自動啟動
# 移除機器上已經安裝的 docker
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
# 安裝依賴
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
# 添加鏡像源
yum-config-manager \
--add-repo \
https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
# 安裝
yum install docker-ce docker-ce-cli containerd.io -y
# 安裝指定版本 docker, 安裝其他軟件也是一樣
yum list docker-ce --showduplicates | sort -r
# Last metadata expiration check: 0:32:36 ago on Mon 16 Aug 2021 02:15:13 PM CST.
# Installed Packages
# docker-ce.x86_64 3:20.10.8-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.8-3.el8 @docker-ce-stable
# docker-ce.x86_64 3:20.10.7-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.6-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.5-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.4-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.3-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.2-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.1-3.el8 docker-ce-stable
# docker-ce.x86_64 3:20.10.0-3.el8 docker-ce-stable
# docker-ce.x86_64 3:19.03.15-3.el8 docker-ce-stable
# docker-ce.x86_64 3:19.03.14-3.el8 docker-ce-stable
# docker-ce.x86_64 3:19.03.13-3.el8 docker-ce-stable
# Available Packages
# 選擇上面 列出的版本進行安裝,比如這里安裝最新版的 20.10.8
yum install docker-ce-20.10.8-3.el8
# 啟動服務,並設置為開機自啟
systemctl start docker
systemctl enable docker
# 更換 docker 的鏡像源
# vim /etc/docker/daemon.json
{
"registry-mirrors" : [
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com",
"https://cr.console.aliyun.com/"
]
}
# 如果當前用戶非 root 用戶,需要加入 docker 的用戶組
# 加入 docker 組后,需要重啟下系統,才能不使用 sudo 使用docker命令
sudo usermod -aG docker <your username>
# 重啟docker
sudo systemctl restart docker
安裝 kubeadm,kubelet,kubectl
這三個組件有版本兼容性要求
具體版本要求參考官網
三個工具所有節點都需要安裝
添加鏡像源
# vim /etc/yum.repos.d/kubernetes.repo
# 內容, 注意 gpgkey 是一行, 兩個 https 中間使用空格拆分
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
# 清理緩存,重建
yum clean all&&yum makecache
開始安裝
當前時間: 2021年8月16
yum install -y kubelet-1.21.4 kubeadm-1.21.4 kubectl-1.21.4
准備初始化集群<Master節點>
查看默認的初始化配置文件, 並導出成文件
kubeadm config print init-defaults > init-defaults.yaml
按照下方示例提示文字,進行修改
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456780abcdef # token 設置
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.5.128 # master 對外訪問ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master # master節點名稱, 此名稱須加入 hosts 文件解析
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/k8sxio # 修改鏡像源地址
kind: ClusterConfiguration
kubernetesVersion: 1.21.0 # 待安裝的 k8s 版本
networking:
dnsDomain: cluster.local
serviceSubnet: 10.244.0.0/16 # flannel 默認網段
scheduler: {}
查看並下載鏡像
可以事先下載然后導入到自己本地的
docker
中
# 查看需要下載那些鏡像
kubeadm config images list --config init-defaults.yaml
# registry.aliyuncs.com/k8sxio/kube-apiserver:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-controller-manager:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-scheduler:v1.22.0
# registry.aliyuncs.com/k8sxio/kube-proxy:v1.22.0
# registry.aliyuncs.com/k8sxio/pause:3.5
# registry.aliyuncs.com/k8sxio/etcd:3.5.0-0
# registry.aliyuncs.com/k8sxio/coredns:v1.8.4
# 開始下載鏡像,防止直接安裝因為某個鏡像下載失敗,導致整體安裝失敗
kubeadm config images pull --config init-defaults.yaml
# 如果發生下面這種拉取鏡像錯誤,嘗試使用docker直接搜索鏡像,然后使用 docker tag 重新打標即可
# [root@k8s-master k8s-install-file]# kubeadm config images pull --config init-defaults.yaml
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-apiserver:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-controller-manager:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-scheduler:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/kube-proxy:v1.21.0
# [config/images] Pulled registry.aliyuncs.com/k8sxio/pause:3.4.1
# [config/images] Pulled registry.aliyuncs.com/k8sxio/etcd:3.4.13-0
# failed to pull image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0": output: Error response from daemon: manifest for registry.aliyuncs.com/k8sxio/coredns:v1.8.0 not found: manifest unknown: manifest unknown
# , error: exit status 1
# To see the stack trace of this error execute with --v=5 or higher
# 這里搜索鏡像
# [root@k8s-master k8s-install-file]# docker search coredns:v1.8.0
# NAME DESCRIPTION STARS OFFICIAL AUTOMATED
# louwy001/coredns-coredns k8s.gcr.io/coredns/coredns:v1.8.0 1
# ninokop/coredns k8s.gcr.io/coredns/coredns:v1.8.0 0
# xwjh/coredns from k8s.gcr.io/coredns/coredns:v1.8.0 0
# hhhlhh/coredns-coredns FROM k8s.gcr.io/coredns/coredns:v1.8.0 0
# suxishuo/coredns k8s.gcr.io/coredns/coredns:v1.8.0 0
# fengbb/coredns k8s.gcr.io/coredns/coredns:v1.8.0 0
# 拉取鏡像
# [root@k8s-master k8s-install-file]# docker pull louwy001/coredns-coredns:v1.8.0
# v1.8.0: Pulling from louwy001/coredns-coredns
# c6568d217a00: Pull complete
# 5984b6d55edf: Pull complete
# Digest: sha256:10ecc12177735e5a6fd6fa0127202776128d860ed7ab0341780ddaeb1f6dfe61
# Status: Downloaded newer image for louwy001/coredns-coredns:v1.8.0
# docker.io/louwy001/coredns-coredns:v1.8.0
# 重新打標,並取消多余的tag名稱
# [root@k8s-master k8s-install-file]# docker tag louwy001/coredns-coredns:v1.8.0 registry.aliyuncs.com/k8sxio/coredns:v1.8.0
# [root@k8s-master k8s-install-file]#
# [root@k8s-master k8s-install-file]# docker rmi louwy001/coredns-coredns:v1.8.0
# Untagged: louwy001/coredns-coredns:v1.8.0
# Untagged: louwy001/coredns-coredns@sha256:10ecc12177735e5a6fd6fa0127202776128d860ed7ab0341780ddaeb1f6dfe61
# [root@k8s-master k8s-install-file]#
卸載集群
如果 初始化集群失敗了,或者參數錯誤,直接執行下面的命令還原設置
kubeadm reset
iptables -F
iptables -X
ipvsadm -C
rm -rf /etc/cni/net.d
rm -rf $HOME/.kube/config
開始初始化
kubeadm init --config init-defaults.yaml
初始化完成后, 根據提示執行初始設置, 並記錄下 加入集群的命令和參數
# 集群配置文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 開機自啟 kubelet
systemctl enable kubelet.service
# 加入集群
kubeadm join 192.168.5.128:6443 --token abcdef.0123456780abcdef \
--discovery-token-ca-cert-hash sha256:d27cf2fd4a45c3ce8c59cdf0163edbf7cd4bc55a994a34404c0e175a47770798
其他節點接入集群
確認安裝好 kubeadm , kubelet, kubectl
在節點機器上執行上面提示的 加入集群命令, 並設置kubelet
為開機自啟
如果沒復制保存上面提示的加入集群命令,可以在master
節點上執行下面的命令來查看加入命令
kubeadm token create --print-join-command
在 master
節點上拷貝集群配置文件給node
, 這樣 node
才能正常使用kubectl
命令,也可以不操作這一步
systemctl enable kubelet.service
scp /etc/kubernetes/admin.conf k8s-node-1:~/.kube/config
配置 Flannel 網絡
安裝 flannel 保證各個節點的pod之間網絡通信
修改集群 kube-controller-manager.yaml
文件,追加網絡參數
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
# 在 command 下面追加兩行
--allocate-node-cidrs=true
--cluster-cidr=10.244.0.0/16
# 重啟 kubelet
systemctl restart kubelet
如果是多網卡的機器,可能需要指定下網卡, 參考這個大佬的文章 文章 "安裝 Pod Network" 中提到的 :
"另外需要注意的是如果你的節點有多個網卡的話,需要在 kube-flannel.yml 中使用
--iface
參數指定集群主機內網網卡的名稱,否則可能會出現 dns 無法解析。"
我猜應該是在flannel部署文件
中,下面的位置加, 注意Kind
和metadata
中的信息
注意: flannel部署文件
在下面給出下載信息
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
....
...
..
.
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface=ens33 # 這里追加參數 <----------------
resources:
requests:
...
....
......
獲取flannel
部署文件,並下載鏡像
curl https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml > kube-flannel.yml
# 查看需要的鏡像
cat kube-flannel.yml | grep image
# image: quay.io/coreos/flannel:v0.14.0
# image: quay.io/coreos/flannel:v0.14.0
# 直接下載如果失敗的話,就用docker搜索下別人上傳的鏡像
docker search flannel:v0.14.0
# NAME DESCRIPTION STARS OFFICIAL AUTOMATED
# xwjh/flannel from quay.io/coreos/flannel:v0.14.0 1
# 下載鏡像並重新進行打tag, 完事后刪除多余的 tag
docker pull xwjh/flannel:v0.14.0
docker tag xwjh/flannel:v0.14.0 quay.io/coreos/flannel:v0.14.0
docker rmi xwjh/flannel:v0.14.0
# 應用配置
kubectl create -f kube-flannel.yml
# [root@k8s-master k8s-install-file]# kubectl create -f kube-flannel.yml
# Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
# podsecuritypolicy.policy/psp.flannel.unprivileged created
# clusterrole.rbac.authorization.k8s.io/flannel created
# clusterrolebinding.rbac.authorization.k8s.io/flannel created
# serviceaccount/flannel created
# configmap/kube-flannel-cfg created
# daemonset.apps/kube-flannel-ds created
# [root@k8s-master k8s-install-file]#
驗證&其他設置
至此k8s簡單搭建版到此結束, 后續多個節點,多
master
之類的查資料設置加入集群即可
驗證節點狀態
執行kubectl get node
查看集群節點狀態, 如果你之前沒裝 flannel
直接執行會看到如下信息
[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master NotReady control-plane,master 21h v1.21.4
k8s-node-1 NotReady <none> 21h v1.21.4
[root@k8s-master ~]#
當你flannel
正確安裝后,會變成如下樣式, 兩個節點都會變成 Ready
狀態
[root@k8s-master k8s-install-file]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 22h v1.21.4
k8s-node-1 Ready <none> 21h v1.21.4
[root@k8s-master k8s-install-file]#
驗證 coredns 狀態
安裝完成后查看pod
狀態可能會出現coredns
錯誤,無法啟動:
[root@k8s-master k8s-install-file]# kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-67574f65b-fh2kq 0/1 ImagePullBackOff 0 22h
kube-system coredns-67574f65b-qspjm 0/1 ImagePullBackOff 0 22h
kube-system etcd-k8s-master 1/1 Running 1 22h
kube-system kube-apiserver-k8s-master 1/1 Running 1 22h
kube-system kube-controller-manager-k8s-master 1/1 Running 1 5h44m
kube-system kube-flannel-ds-h5fd6 1/1 Running 0 7m33s
kube-system kube-flannel-ds-z945p 1/1 Running 0 7m33s
kube-system kube-proxy-rmwcx 1/1 Running 1 21h
kube-system kube-proxy-vzmjw 1/1 Running 1 22h
kube-system kube-scheduler-k8s-master 1/1 Running 1 22h
[root@k8s-master k8s-install-file]#
我們查看下pod
的錯誤信息
root@k8s-master k8s-install-file]# kubectl -n kube-system describe pod coredns-67574f65b-fh2kq
Name: coredns-67574f65b-fh2kq
Namespace: kube-system
Priority: 2000000000
Priority Class Name: system-cluster-critical
Node: k8s-node-1/192.168.5.129
Start Time: Tue, 17 Aug 2021 14:54:36 +0800
Labels: k8s-app=kube-dns
pod-template-hash=67574f65b
Annotations: <none>
Status: Pending
IP: 10.244.1.3
IPs:
IP: 10.244.1.3
Controlled By: ReplicaSet/coredns-67574f65b
Containers:
coredns:
Container ID:
Image: registry.aliyuncs.com/k8sxio/coredns:v1.8.0
Image ID:
Ports: 53/UDP, 53/TCP, 9153/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
-conf
/etc/coredns/Corefile
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8181/ready delay=0s timeout=1s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/etc/coredns from config-volume (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-trjcg (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
config-volume:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: coredns
Optional: false
kube-api-access-trjcg:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: kubernetes.io/os=linux
Tolerations: CriticalAddonsOnly op=Exists
node-role.kubernetes.io/control-plane:NoSchedule
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 4h53m (x1020 over 21h) default-scheduler 0/2 nodes are available: 2 node(s) had taint {node.kubernetes.io/not-ready:}, that the pod didn't tolerate.
Warning FailedScheduling 8m6s (x9 over 14m) default-scheduler 0/2 nodes are available: 2 node(s) had taint {node.kubernetes.io/not-ready:}, that the pod didn't tolerate.
Normal Scheduled 7m56s default-scheduler Successfully assigned kube-system/coredns-67574f65b-fh2kq to k8s-node-1
Normal Pulling 6m27s (x4 over 7m54s) kubelet Pulling image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0"
Warning Failed 6m26s (x4 over 7m53s) kubelet Failed to pull image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0": rpc error: code = Unknown desc = Error response from daemon: manifest for registry.aliyuncs.com/k8sxio/coredns:v1.8.0 not found: manifest unknown: manifestunknown
Warning Failed 6m26s (x4 over 7m53s) kubelet Error: ErrImagePull
Warning Failed 6m15s (x6 over 7m53s) kubelet Error: ImagePullBackOff
Normal BackOff 2m45s (x21 over 7m53s) kubelet Back-off pulling image "registry.aliyuncs.com/k8sxio/coredns:v1.8.0"
發現錯誤是拉取鏡像失敗, 但是master
節點確實存在這個鏡像, 那這個指的就是 node
節點上缺少鏡像,我們導出master
上的registry.aliyuncs.com/k8sxio/coredns:v1.8.0
拷貝給node
節點導入即可
docker save -o coredns.zip registry.aliyuncs.com/k8sxio/coredns:v1.8.0
scp coredns.zip k8s-node-1:~
# node 節點
docker load -i coredns.zip
重新查看狀態
[root@k8s-master k8s-install-file]# kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE
coredns-67574f65b-fh2kq 1/1 Running 0 22h
coredns-67574f65b-qspjm 1/1 Running 0 22h
etcd-k8s-master 1/1 Running 1 22h
kube-apiserver-k8s-master 1/1 Running 1 22h
kube-controller-manager-k8s-master 1/1 Running 1 5h58m
kube-flannel-ds-h5fd6 1/1 Running 0 21m
kube-flannel-ds-z945p 1/1 Running 0 21m
kube-proxy-rmwcx 1/1 Running 1 22h
kube-proxy-vzmjw 1/1 Running 1 22h
kube-scheduler-k8s-master 1/1 Running 1 22h
[root@k8s-master k8s-install-file]#
node 節點角色為 none
查看節點詳細信息, 可以看到node
節點為none
角色, 我們手動指定節點為node
[root@k8s-master k8s-install-file]# kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master Ready control-plane,master 22h v1.21.4 192.168.5.128 <none> CentOS Linux 8 4.18.0-305.12.1.el8_4.x86_64 docker://20.10.8
k8s-node-1 Ready <none> 22h v1.21.4 192.168.5.129 <none> CentOS Linux 8 4.18.0-305.12.1.el8_4.x86_64 docker://20.10.8
[root@k8s-master k8s-install-file]
執行下面的命令修改節點角色
kubectl label node <node name> node-role.kubernetes.io/node=
[root@k8s-master k8s-install-file]# kubectl label node k8s-node-1 node-role.kubernetes.io/node=
node/k8s-node-1 labeled
[root@k8s-master k8s-install-file]#
[root@k8s-master k8s-install-file]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 22h v1.21.4
k8s-node-1 Ready node 22h v1.21.4
[root@k8s-master k8s-install-file]#
設置節點角色
# 設置節點為 master
kubectl label node <node name> node-role.kubernetes.io/master=
# 設置 test2 為 node 角色
kubectl label node <node name> node-role.kubernetes.io/node=
# 設置 master 一般情況下不接受負載
kubectl taint node <node name> node-role.kubernetes.io/master=true:NoSchedule
# 設置 master 不運行pod
kubectl taint node <node name> node-role.kubernetes.io/master=:NoSchedule
# 刪除節點標簽<角色>, 只要修改 = 號為 - 號即可
kubectl label node k8s-node-1 node-role.kubernetes.io/node-
所有節點都允許運行pod
kubectl taint nodes --all node-role.kubernetes.io/master-
# 多次運行后是這個提示, 正常可以參考官網提示
# [root@k8s-master k8s-install-file]# kubectl taint nodes --all node-role.kubernetes.io/master-
# taint "node-role.kubernetes.io/master" not found
# taint "node-role.kubernetes.io/master" not found
# [root@k8s-master k8s-install-file]#
修改 NodePort 端口范圍
默認端口號范圍是 30000-32767
修改后等一會兒就可以生效
# vim /etc/kubernetes/manifests/kube-apiserver.yaml
# 在 command 末尾追加下面的參數,設置范圍為 0-65535 全端口
- --service-node-port-range=0-65535