k8s手動安裝


一、主節點安裝

設置主機名
hostnamectl set-hostname master
hostnamectl set-hostname node01

修改hosts文件
vim /etc/hosts
192.168.1.91 master
192.168.1.92 node01

關閉防火牆
systemctl stop firewalld
systemctl disable firewalld


禁用SELINUX
setenforce 0
sed -i -re '/^\s*SELINUX=/s/^/#/' -e '$i\\SELINUX=disabled' /etc/selinux/config

關閉swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab

ipv6設置
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

安裝docker
安裝依賴
yum install yum-utils device-mapper-persistent-data lvm2 -y
下載repo文件
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
安裝docker
yum update && yum install docker-ce-18.06.2.ce

設置docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF

systemctl daemon-reload
systemctl restart docker

 

安裝kubeadm, kubelet and kubectl(所有節點都要安裝)
修改k8s源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.16.3 kubectl-1.16.3 kubeadm-1.16.3 --disableexcludes=kubernetes
systemctl enable --now kubelet

注:使用yum安裝程序時,如提示如下xxx.rpm公鑰尚未安裝,使用 yum install xxx.rpm --nogpgcheck 命令格式跳過公鑰檢查,如下:
yum install -y kubelet-1.16.3 kubectl-1.16.3 kubeadm-1.16.3 --disableexcludes=kubernetes --nogpgcheck

設置本地倉庫拉取鏡像
vi /usr/lib/systemd/system/docker.service
添加 --insecure-registry 172.16.40.78:1180
ExecStart=/usr/bin/dockerd --insecure-registry 172.16.40.78:1180
systemctl daemon-reload
systemctl restart docker
登錄docker倉庫
docker login 172.16.40.78:1180
輸入用戶名和密碼
拉取鏡像
docker pull 172.16.40.78:1180/k8s/kube-apiserver:v1.16.3
docker pull 172.16.40.78:1180/k8s/kube-controller-manager:v1.16.3
docker pull 172.16.40.78:1180/k8s/kube-scheduler:v1.16.3
docker pull 172.16.40.78:1180/k8s/kube-proxy:v1.16.3
docker pull 172.16.40.78:1180/k8s/pause:3.1
docker pull 172.16.40.78:1180/k8s/etcd:3.3.15-0
docker pull 172.16.40.78:1180/k8s/coredns:1.6.2
docker pull 172.16.40.78:1180/k8s/flannel:v0.11.0-amd64
鏡像重命名標簽
docker tag 172.16.40.78:1180/k8s/kube-apiserver:v1.16.3 k8s.gcr.io/kube-apiserver:v1.16.3
docker tag 172.16.40.78:1180/k8s/kube-controller-manager:v1.16.3 k8s.gcr.io/kube-controller-manager:v1.16.3
docker tag 172.16.40.78:1180/k8s/kube-scheduler:v1.16.3 k8s.gcr.io/kube-scheduler:v1.16.3
docker tag 172.16.40.78:1180/k8s/kube-proxy:v1.16.3 k8s.gcr.io/kube-proxy:v1.16.3
docker tag 172.16.40.78:1180/k8s/pause:3.1 k8s.gcr.io/pause:3.1
docker tag 172.16.40.78:1180/k8s/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0
docker tag 172.16.40.78:1180/k8s/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2
docker tag 172.16.40.78:1180/k8s/flannel:v0.11.0-amd64 quay.io/coreos/flannel:v0.11.0-amd64


初始化集群 (只需要在Master上執行)
kubeadm init --pod-network-cidr=10.244.0.0/16

記錄最后面的執行信息 如下:后續在worker節點加入集群時,在worker節點執行該命令
kubeadm join 192.168.1.91:6443 --token mv96do.5j4o789pww3w9b8t \
--discovery-token-ca-cert-hash sha256:15844ec4b9ce3a5659b07fd48b5d11f604d3b31539860584f63bdc933f016bd0


設置KUBECONFIG (所有節點都要配置)
注意:worker節點需要從主節點copy /etc/kubernetes/admin.conf文件
scp /etc/kubernetes/admin.conf root@node01:/etc/kubernetes/
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=$HOME/.kube/config

配置Pod網絡插件flannel(只需要在Master上執行)
通過網站的flannel配置文件創建pod,如果失敗,需要刪除配置等網絡恢復,重新創建。
創建命令
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

注意:上述文件下載不動的話,參考這個地址https://www.cnblogs.com/runnerjack/p/12880583.html ,然后將內容保存成kube-flannel.yml文件即可。
刪除命令
kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml

執行命令 “kubectl get pods -n kube-system”查看coredns pod的狀態,直到STATUS變成Running
NAME READY STATUS RESTARTS AGE
coredns-5644d7b6d9-dx6qm 1/1 Running 0 31m
coredns-5644d7b6d9-w8th9 1/1 Running 0 31m
etcd-master 1/1 Running 0 30m
kube-apiserver-master 1/1 Running 0 30m
kube-controller-manager-master 1/1 Running 0 29m
kube-flannel-ds-amd64-bm9xl 1/1 Running 0 54s
kube-proxy-2k5nz 1/1 Running 0 31m
kube-scheduler-master 1/1 Running 0 30m

查看集群信息
# 查看節點信息
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 35m v1.16.3

# 查看集群信息
kubectl cluster-info
Kubernetes master is running at https://192.168.1.91:6443
KubeDNS is running at https://192.168.1.91:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

# 查看所有的 Pod 命令
kubectl get pods --all-namespaces


Node節點加入集群(僅在worker節點執行)
kubeadm join 192.168.1.91:6443 --token mv96do.5j4o789pww3w9b8t --discovery-token-ca-cert-hash sha256:15844ec4b9ce3a5659b07fd48b5d11f604d3b31539860584f63bdc933f016bd0

執行的提示信息:
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

節點加入成功后可以查看新的集群信息
# 查看節點信息
kubectl get nodes
# 查看集群信息
kubectl cluster-info

安裝dashboard(可選)
# 安裝dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

# 刪除dashboard
kubectl -n kube-system delete $(kubectl -n kube-system get pod -o name | grep dashboard)

 

 二、工作節點

設置主機名
hostnamectl set-hostname node02
hostnamectl set-hostname node03

修改hosts文件
vim /etc/hosts
192.168.1.91 master
192.168.1.92 node01
192.168.1.121 node02
192.168.1.122 node03

關閉防火牆
systemctl stop firewalld
systemctl disable firewalld

禁用SELINUX
setenforce 0
sed -i -re '/^\s*SELINUX=/s/^/#/' -e '$i\\SELINUX=disabled' /etc/selinux/config

關閉swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab

ipv6設置
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

安裝docker
安裝依賴
yum install yum-utils device-mapper-persistent-data lvm2 -y
下載repo文件
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
安裝docker
yum update && yum install docker-ce-18.06.2.ce

設置docker
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF

設置本地倉庫拉取鏡像
vi /usr/lib/systemd/system/docker.service
添加 --insecure-registry 172.16.40.78:1180
ExecStart=/usr/bin/dockerd --insecure-registry 172.16.40.78:1180
systemctl daemon-reload
systemctl restart docker
登錄docker倉庫
docker login 172.16.40.78:1180
輸入用戶名和密碼
拉取鏡像
docker pull 172.16.40.78:1180/k8s/kube-proxy:v1.16.3
docker pull 172.16.40.78:1180/k8s/pause:3.1
docker pull 172.16.40.78:1180/k8s/etcd:3.3.15-0
docker pull 172.16.40.78:1180/k8s/coredns:1.6.2
docker pull 172.16.40.78:1180/k8s/flannel:v0.11.0-amd64
鏡像重命名標簽
docker tag 172.16.40.78:1180/k8s/kube-proxy:v1.16.3 k8s.gcr.io/kube-proxy:v1.16.3
docker tag 172.16.40.78:1180/k8s/pause:3.1 k8s.gcr.io/pause:3.1
docker tag 172.16.40.78:1180/k8s/etcd:3.3.15-0 k8s.gcr.io/etcd:3.3.15-0
docker tag 172.16.40.78:1180/k8s/coredns:1.6.2 k8s.gcr.io/coredns:1.6.2
docker tag 172.16.40.78:1180/k8s/flannel:v0.11.0-amd64 quay.io/coreos/flannel:v0.11.0-amd64


安裝kubeadm, kubelet and kubectl(所有節點都要安裝)
修改k8s源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.16.3 kubectl-1.16.3 kubeadm-1.16.3 --disableexcludes=kubernetes
systemctl enable --now kubelet

注意:worker節點需要從主節點copy /etc/kubernetes/admin.conf文件
scp /etc/kubernetes/admin.conf root@node01:/etc/kubernetes/


Node節點加入集群(僅在worker節點執行)創建主節點token(kubeadm token create)
kubeadm join 192.168.1.91:6443 --token enwm6l.2hyqoedt4q4ztm18 --discovery-token-ca-cert-hash sha256:15844ec4b9ce3a5659b07fd48b5d11f604d3b31539860584f63bdc933f016bd0 -v=10

如果添加失敗,添加參數-v=10 查看錯誤原因
kubeadm join 192.168.1.91:6443 --token enwm6l.2hyqoedt4q4ztm18 --discovery-token-ca-cert-hash sha256:15844ec4b9ce3a5659b07fd48b5d11f604d3b31539860584f63bdc933f016bd0 -v=10

刪除節點 重新添加
主節點執行 kubectl delete node 節點名
重置工作節點 kubeadm reset


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM