如果一件事情你需要做不止一遍,每一遍至少花费你十分钟以上,那么就值得你为它做一个自动化流程。
前面写了两个使用kubeadm搭建k8s集群的文章,一篇使用的是docker,一篇是用的containerd,弄一个自动化部署脚本比较合适
操作步骤:
1. vi /usr/bin/kubestart
将以下内容复制进去
#! /bin/bash
###############################################
## Simple Install Your K8S
###############################################
kube_pod_subnet="10.244.0.0/16"
kube_version="1.21.3"
kube_image_server="registry.cn-hangzhou.aliyuncs.com/google_containers"
crictl_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.21.0/"
crictl_name="crictl-v1.21.0-linux-amd64.tar.gz"
function init-env-disable-selinux()
{
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
}
function init-env-disable-firewalld()
{
sudo systemctl stop firewalld
sudo systemctl disable --now firewalld
}
function init-env-repository()
{
sudo echo -e "[kubernetes] \nname=Kubernetes \nbaseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ \nenabled=1 \ngpgcheck=1 \nrepo_gpgcheck=1 \ngpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg" > /etc/yum.repos.d/kubernetes.repo
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install centos-release-openstack-rocky -y
}
function init-install-kube()
{
sudo yum install -y kubeadm-${kube_version} kubectl-${kube_version} kubelet-${kube_version} --disableexcludes=kubernetes
}
function init-docker()
{
yum install -y wget docker-ce openvswitch* certbot
systemctl start docker
systemctl enable docker
systemctl enable kubelet
}
function init-containerd()
{
sudo yum install -y wget containerd.io openvswitch* certbot
mkdir -p /etc/containerd
cd /home
wget $crictl_url$crictl_name
tar zxf $crictl_name
mv crictl /usr/local/bin/
rm -rf $crictl_name
echo "overlay" > /etc/modules-load.d/containerd.conf
echo "br_netfilter" >> /etc/modules-load.d/containerd.conf
sudo modprobe overlay
sudo modprobe br_netfilter
echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/99-kubernetes-cri.conf
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.d/99-kubernetes-cri.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.d/99-kubernetes-cri.conf
sudo sysctl --system
sudo containerd config default > /etc/containerd/config.toml
sed -i 's/k8s.gcr.io\/pause:3.1/registry.cn-hangzhou.aliyuncs.com\/google_containers\/pause:3.2/g' /etc/containerd/config.toml
sudo systemctl restart containerd
sudo systemctl enable containerd
echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" > /etc/sysconfig/kubelet
echo "runtime-endpoint: unix:///run/containerd/containerd.sock" > /etc/crictl.yaml
echo "image-endpoint: unix:///run/containerd/containerd.sock" >> /etc/crictl.yaml
echo "timeout: 10" >> /etc/crictl.yaml
echo "debug: false" >> /etc/crictl.yaml
sudo systemctl restart kubelet
sudo systemctl enable kubelet
}
function init-env-kubeconfig()
{
mkdir /etc/kubernetes
echo -e "apiVersion: kubeadm.k8s.io/v1beta2" > /etc/kubernetes/kubeadm.yaml
echo -e "kind: InitConfiguration" >> /etc/kubernetes/kubeadm.yaml
echo -e "bootstrapTokens:" >> /etc/kubernetes/kubeadm.yaml
echo -e " - ttl: \"0\"" >> /etc/kubernetes/kubeadm.yaml
echo -e "---" >> /etc/kubernetes/kubeadm.yaml
echo -e "apiVersion: kubeadm.k8s.io/v1beta2" >> /etc/kubernetes/kubeadm.yaml
echo -e "kind: ClusterConfiguration" >> /etc/kubernetes/kubeadm.yaml
echo -e "networking:" >> /etc/kubernetes/kubeadm.yaml
echo -e " podSubnet: \"${kube_pod_subnet}\"" >> /etc/kubernetes/kubeadm.yaml
echo -e "kubernetesVersion: \"v${kube_version}\"" >> /etc/kubernetes/kubeadm.yaml
echo -e "imageRepository: \"${kube_image_server}\"" >> /etc/kubernetes/kubeadm.yaml
}
function init-env-kubecomp()
{
# default calico cni
echo -e "https://docs.projectcalico.org/archive/v3.17/manifests/calico.yaml" > /etc/kubernetes/kubeenv.list
# flannel: cni config url
# echo -e "https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml" > /etc/kubernetes/kubeenv.list
}
function init-env()
{
init-env-disable-selinux
init-env-disable-firewalld
init-env-repository
init-install-kube
if [[ -z $2 ]]
then
init-containerd
elif [[ $2 == "docker" ]]
then
init-docker
elif [[ $2 == "containerd" ]]
then
init-containerd
else
echo "only support docker or containerd"
exit 1
fi
init-env-kubeconfig
init-env-kubecomp
}
function run-kube()
{
sudo swapoff -a && sysctl -w vm.swappiness=0
sudo sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
systemctl restart kubelet
echo "1" > /proc/sys/net/bridge/bridge-nf-call-iptables
echo "1" > /proc/sys/net/ipv4/ip_forward
kubeadm init --config /etc/kubernetes/kubeadm.yaml
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
iptables -P FORWARD ACCEPT
while read line
do
kubectl apply -f $line
done < /etc/kubernetes/kubeenv.list
}
function help()
{
echo -e "Commands:"
echo -e " init-env :\t(Init): Initialize the environment configuration, simplify configuring node, such as disable selinux, install docker or containerd..."
echo -e " run-kube :\t(Init): deploy Kubernetes as your want by editing /etc/kubernetes/kubeenv.list. Now it includes calico, flannel"
}
case $1 in
"init-env")
init-env $*
;;
"run-kube")
run-kube $*
;;
"--help")
help
;;
*)
help
;;
esac
2. sudo chmod 777 /usr/bin/kubestart
3.然后你就可以使用以下两条命令去创建你的k8s集群了
# 初始化各种环境配置
kubestart init-env
# 启动集群,应用cni
kubestart run-kube
# 帮助命令
kubestart help
脚本说明:
脚本开头的几个参数,是关于你想使用的 k8s 版本以及 crictl 安装包,想要使用其他版本的可以在开头改一下。想要更换版本可以使用
yum list --showduplicates kubeadm --disableexcludes=kubernetes
命令查看一下自己的yum源支持哪些版本。
kubestart init-env 命令后面还可以添加其他参数,选择容器运行时,可以使用 docker 或者 containerd,默认是 containerd。
执行 kubeadm init 命令使用的是 /etc/kubernetes/kubeadm.yaml 配置文件启动的,如果有特殊需要,在执行完 init-env 命令后去修改这个文件内容就可以,其实启动集群的方式就是这条命令:
kubeadm init --config /etc/kubernetes/kubeadm.yaml
默认使用的是 calico 作为 cni 应用,你如果想使用其他的,去修改一下脚本里面的 init-env-kubecomp 函数即可。