-
基础环境
IP地址 | 主机名 | 节点 |
---|---|---|
172.20.48.57 | k8s-master1 | Master1 |
172.20.48.53 | K8s-node1 | Node1 |
172.20.48.54 | K8s-node1 | Node2 |
一、环境准备
1. 简要
2. 安装要求
3台纯净centos虚拟机,版本为7.x及以上
机器配置 2核4G以上 x3台
服务器网络互通
禁止swap分区
3. 环境准备
# 1. 关闭防火墙功能
systemctl stop firewalld
systemctl disable firewalld
# 2.关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
# 3. 关闭swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
# 4. 服务器规划
cat >> /etc/hosts <<EOF
172.20.48.57 k8s-master1
17.20.48.53 k8s-node1
172.20.48.54 k8s-node2
EOF
#5. 临时主机名配置方法:
hostnamectl set-hostname k8s-master1
bash
#6. 时间同步配置
yum install chronyd -y
cn.ntp.org.cn
systemctl restart chronyd ; systemctl enable chronyd
#开启转发
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
#7. 时间同步
echo '*/5 * * * * /usr/sbin/ntpdate -u ntp.api.bz' >>/var/spool/cron/root
systemctl restart crond.service
crontab -l
# 以上可以全部复制粘贴直接运行,但是主机名配置需要重新修改
4. Docker 安装[所有节点都需要安装]
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/epel-7.repo
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum clean all
yum install -y bash-completion.noarch
# 安装指定版版本
yum -y install docker-ce-18.09.9-3.el7
#也可以查看版本安装
yum list docker-ce --showduplicates | sort -r
# 配置docker.json
mkdir -p /etc/docker/
cat >> /etc/docker.json <<EOF
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "/data/docker",
"insecure-registries":["https://xg0px8kr2.mirror.aliyuncs.com"], # 修改成自己的镜像加速器
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 64000,
"Soft": 64000
}
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"mtu": 1450
}
EOF
# 启动docker
systemctl enable docker
systemctl start docker
systemctl status docker
5. kubernetes源配置[所有节点]
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
6. 安装kubeadm,kubelet和kubectl[所有节点]
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
systemctl enable kubelet
7. 部署Kubernetes Master [ master 172.20.48.57 ]
kubeadm init \
--apiserver-advertise-address=172.20.48.57 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.18.0 \
--service-cidr=10.10.0.0/16 \
--pod-network-cidr=20.20.0.0/16
#成功后加入环境变量[master]:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
初始化后获取到的token
kubeadm join 172.20.48.57:6443 --token h8qee8.ry1geq7ujkliir0x \ --discovery-token-ca-cert-hash sha256:015373961494015e0d6ab9cdcff347c9bae30506b9fcac8874ce0a59191363f8
记住token,后面使用
8. 添加k8s-node节点
# node1 和node2节点上操作
kubeadm join 172.20.48.57:6443 --token h8qee8.ry1geq7ujkliir0x \
--discovery-token-ca-cert-hash sha256:015373961494015e0d6ab9cdcff347c9bae30506b9fcac8874ce0a59191363f8
9.kubectl命令工具配置[master]
[root@k8s-master1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master1 NotReady master 2m55s v1.18.0
k8s-node1 NotReady <none> 17s v1.18.0
k8s-node2 NotReady <none> 11s v1.18.0
10. 安装flanner插件
官网: https://github.com/coreos/flannel/ wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
wget kube-flannel.yml的时候显示连接失败 是因为网站被墙了,建议在/etc/hosts文件添加一条 199.232.68.133 raw.githubusercontent.com 就可以正常下载了。 下载完成后创建并查看
kubectl create -f kube-flannel.yml [root@k8s-master1 ~]# kubectl get po -n kube-system NAME READY STATUS RESTARTS AGE coredns-7ff77c879f-ts2g2 0/1 Running 0 60m coredns-7ff77c879f-zmgp7 0/1 Running 0 60m etcd-k8s-master1 1/1 Running 0 60m kube-apiserver-k8s-master1 1/1 Running 0 60m kube-controller-manager-k8s-master1 1/1 Running 0 60m kube-flannel-ds-48mhv 1/1 Running 0 103s kube-flannel-ds-rphrr 1/1 Running 0 103s kube-flannel-ds-strdj 1/1 Running 0 103s kube-proxy-5mqpv 1/1 Running 0 58m kube-proxy-c4t86 1/1 Running 0 58m kube-proxy-gj9lv 1/1 Running 0 60m kube-scheduler-k8s-master1 1/1 Running 0 60m # vim kube-flannel.yml net-conf.json: | { "Network": "20.20.0.0/16", # 修改为初始化时设置pod的网段 "Backend": { "Type": "vxlan" }
11. 安装doshbord界面
wget https://kuboard.cn/install-script/kuboard.yaml kubectl apply -f kuboard.yaml kubectl get po -n kube-system 任意节点访问: http://172.20.48.57:32567/login # 获取dashboard token, 也就是创建service account并绑定默认cluster-admin管理员集群角色 kubectl create serviceaccount dashboard-admin -n kube-system kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin [root@k8s-master1 ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}') Name: dashboard-admin-token-bmw5k Namespace: kube-system Labels: <none> Annotations: kubernetes.io/service-account.name: dashboard-admin kubernetes.io/service-account.uid: 6f843fe6-10fc-41f3-a136-26690a68d33b Type: kubernetes.io/service-account-token Data ==== ca.crt: 1025 bytes namespace: 11 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6Ik5USkIxdElfTnVDUUpLclhwNW1TUW13VmtXWTRmQmJxcFVRQ0R3RHp1ZGcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tYm13NWsiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNmY4NDNmZTYtMTBmYy00MWYzLWExMzYtMjY2OTBhNjhkMzNiIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.RHghwCqWONwRrGdmMKzaXrI30QPhXBYisj7i4T-0P9heDSdVkINJENIhzM1NK2SWG6CanOEqgMYQK4B0uqqAA4js-uws6C81M0RJUlU5Esoqum0ANLnemEA3sNh6zJw8WTwgKg52A05t1N1awIrA9fbDE4X8uX4qVZLAywqWCTR39EuINh8Ma4Qemvbzvv9-yS8JNQPn2gw83vpoNcU1og3ltkG5toPyqnttOty-wcfNNicQBaSUDQmVTai4yeLQD9xc8wty8O3iIsXuC37RkO2P3V2P8pUizqhkK3jitbrpS79R8IThsqEhni5Md7zQrGwK6vP2_qe5TCqLW1V8vg
12 删除node节点
[root@k8s-master1 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master1 Ready master 47h v1.18.0 k8s-node1 Ready <none> 47h v1.18.0 k8s-node2 Ready <none> 47h v1.18.0 [root@k8s-master1 ~]# kubectl delete node k8s-node2node "k8s-node2" deleted [root@k8s-master1 ~]# kubectl get nodeNAME STATUS ROLES AGE VERSION k8s-master1 Ready master 47h v1.18.0k8s-node1 Ready <none> 47h v1.18.0
13. 创建Token添加新node节点
默认token会保存24消失,过期后就不可用,如果需要重新建立token,可在master节点使用以下命令重新生成: [root@k8s-master1 ~]# kubeadm token create ## kubeadm token create --print-join-command W0207 12:34:59.833174 21114 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] ari0w3.5516mkdspyse8mwo [root@k8s-master1 ~]# kubeadm token list TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS ari0w3.5516mkdspyse8mwo 23h 2021-02-08T12:34:59+08:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token h8qee8.ry1geq7ujkliir0x 21h 2021-02-08T10:29:45+08:00 authentication,signing The default bootstrap token generated by 'kubeadm init'. system:bootstrappers:kubeadm:default-node-token ogf4ei.uulghpvnq0sldbh1 22h 2021-02-08T11:09:46+08:00 authentication,signing <none> system:bootstrappers:kubeadm:default-node-token [root@k8s-master1 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' 015373961494015e0d6ab9cdcff347c9bae30506b9fcac8874ce0a59191363f8 新token加入集群方法: kubeadm join 10.0.0.63:6443 --discovery-token ari0w3.5516mkdspyse8mwo --discovery-token-ca-cert-hash 3d847b858ed649244b4110d4d60ffd57f43856f42ca9c22e12ca33946673ccb4
14 添加master节点
kubeadm join 172.20.48.57:6443 --token 1xe1pj.9dtlyfcsuroyd13j --discovery-token-ca-cert-hash sha256:015373961494015e0d6ab9cdcff347c9bae30506b9fcac8874ce0a59191363f8 --control-plane --certificate-key 58bcb304f1aeddc98c315e8cf30582779d4c2519e4d00513b6040783758b55db 9b65bea41bd75fbe3cda68fd757bd2741ab2676cc261c914bd79a3097a2219b3 55d7e0dc31eac63e78366dea26d4f100bf35213f5464203d3fd24c3d64b3ccc1