kubeadm部署k8s v1.18.6版本


 

該文檔僅作為備用,在部署應用時可能會有錯誤或沒解釋清楚,請通過留言或文末二維碼進群討論!!!!

參考

Hostname IP CPU Memory Disk 發行版本  
k8s1 172.16.186.132 2 4 150G CentOS  7.4.1708 Vmware 15 Pro
k8s2 172.16.186.133 2 4 150G CentOS  7.4.1708 Vmware 15 Pro
k8s3 172.16.186.134 2 4 150G CentOS  7.4.1708 Vmware 15 Pro

關閉firewall 和 selinux(三台都需要操作)
sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
setenforce 0
systemctl stop firewalld && systemctl disable firewalld

配置hosts文件
[root@k8s1 ~]# cat >> /etc/hosts << EOF    
172.16.186.132  k8s1
172.16.186.133  k8s2
172.16.186.134  k8s3
EOF
[root@k8s1 ~]# for i in {2..3};do scp /etc/hosts root@k8s$i:/etc;done

master配置免密登陸各node
[root@k8s1 ~]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[root@k8s1 ~]# for i in {1..3};do ssh-copy-id k8s$i;done

設置為阿里雲yum源(三台都需要操作)
[root@k8s1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s1 ~]# for i in {2..3};do scp /etc/yum.repos.d/CentOS-Base.repo k8s$i:/etc/yum.repos.d/;done

掛載光盤(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i mount /dev/sr0 /mnt/usb1;done

安裝依賴包(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum -y install epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp;done

配置iptables(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT;done

關閉swap分區(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i swapoff -a;done
注:swapoff為本次關閉,如是長期關閉需在/etc/fstab中將swap項刪除或注釋

加載內核模塊(三台都需要操作)
[root@k8s1 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF
釋義:
modprobe -- ip_vs               # lvs基於4層的負載均衡
modprobe -- ip_vs_rr            # 輪詢
modprobe -- ip_vs_wrr           # 加權輪詢
modprobe -- ip_vs_sh            # 源地址散列調度算法
modprobe -- nf_conntrack_ipv4   # 鏈接跟蹤模塊
modprobe -- br_netfilter        # 遍歷橋的數據包由iptables進行處理以進行

[root@k8s1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
[root@k8s1 ~]# for i in {2..3};do scp /etc/sysconfig/modules/ipvs.modules k8s$i:/etc/sysconfig/modules/;done
[root@k8s1 ~]# for i in {2..3};do ssh k8s$i bash /etc/sysconfig/modules/ipvs.modules;done

設置內核參數(三台都需要操作)
[root@k8s1 ~]# cat>> /etc/sysctl.d/k8s.conf<<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

[root@k8s1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
[root@k8s1 ~]# for i in {2..3};do scp /etc/sysctl.d/k8s.conf k8s$i:/etc/sysctl.d/;done
[root@k8s1 ~]# for i in {2..3};do ssh k8s$i sysctl -p /etc/sysctl.d/k8s.conf;done
釋義:
# overcommit_memory 是一個內核對內存分配的一種策略,取值又三種分別為0, 1, 2
- overcommit_memory=0   '表示內核將檢查是否有足夠的可用內存供應用進程使用;如果有足夠的可用內存,內存申請允許;否則,內存申請失敗,並把錯誤返回給應用進程。
- overcommit_memory=1   '表示內核允許分配所有的物理內存,而不管當前的內存狀態如何。
- overcommit_memory=2   '表示內核允許分配超過所有物理內存和交換空間總和的內存

 

部署docker
安裝docker依賴包(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum -y install yum-utils device-mapper-persistent-data lvm2;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo;done
安裝docker(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum -y install docker-ce;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i systemctl enable docker;done
[root@k8s1 ~]# sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
[root@k8s2 ~]# sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
[root@k8s3 ~]# sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
作用:# 安裝完成后配置啟動時的命令,否則 docker 會將 iptables FORWARD chain 的默認策略設置為DROP

[root@k8s1 ~]# for i in {1..3};do ssh k8s$i mkdir /etc/docker;done
[root@k8s1 ~]# tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://bk6kzfqm.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
釋義:
"registry-mirrors": ["https://bk6kzfqm.mirror.aliyuncs.com"],   # 配置阿里雲鏡像加速
"exec-opts": ["native.cgroupdriver=systemd"],    # 將 systemd 設置為 cgroup 驅動

[root@k8s1 ~]# for i in {2..3};do scp /etc/docker/daemon.json k8s$i:/etc/docker;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i systemctl daemon-reload;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i systemctl restart docker;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i systemctl status docker|grep "running";done

部署kubeadm和kubelet
配置yum源(三台都需要操作)
[root@k8s1 ~]# cat >>/etc/yum.repos.d/kubernetes.repo<<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

[root@k8s1 ~]# for i in {2..3};do scp /etc/yum.repos.d/kubernetes.repo k8s$i:/etc/yum.repos.d/;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum install -y kubelet kubeadm kubectl;done
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i systemctl enable kubelet;done

配置自動補全命令(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i yum -y install bash-completion;done
設置kubectl與kubeadm命令補全,下次login生效
[root@k8s1 ~]# kubectl completion bash > /etc/bash_completion.d/kubectl
[root@k8s1 ~]# kubeadm completion bash > /etc/bash_completion.d/kubeadm
[root@k8s2 ~]# kubectl completion bash > /etc/bash_completion.d/kubectl
[root@k8s2 ~]# kubeadm completion bash > /etc/bash_completion.d/kubeadm
[root@k8s3 ~]# kubectl completion bash > /etc/bash_completion.d/kubectl
[root@k8s3 ~]# kubeadm completion bash > /etc/bash_completion.d/kubeadm

查看k8s依賴的包(三台都需要操作)
[root@k8s1 ~]# for i in {1..3};do ssh k8s$i kubeadm config images list --kubernetes-version v1.18.6;done

拉取所需鏡像(三台都需要操作)
[root@k8s1 ~]# vim get-k8s-images.sh
#!/bin/bash
# Script For Quick Pull K8S Docker Images

KUBE_VERSION=v1.18.6
PAUSE_VERSION=3.2
CORE_DNS_VERSION=1.6.7
ETCD_VERSION=3.4.3-0

# pull kubernetes images from hub.docker.com
docker pull kubeimage/kube-proxy-amd64:$KUBE_VERSION
docker pull kubeimage/kube-controller-manager-amd64:$KUBE_VERSION
docker pull kubeimage/kube-apiserver-amd64:$KUBE_VERSION
docker pull kubeimage/kube-scheduler-amd64:$KUBE_VERSION
# pull aliyuncs mirror docker images
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION

# retag to k8s.gcr.io prefix
docker tag kubeimage/kube-proxy-amd64:$KUBE_VERSION  k8s.gcr.io/kube-proxy:$KUBE_VERSION
docker tag kubeimage/kube-controller-manager-amd64:$KUBE_VERSION k8s.gcr.io/kube-controller-manager:$KUBE_VERSION
docker tag kubeimage/kube-apiserver-amd64:$KUBE_VERSION k8s.gcr.io/kube-apiserver:$KUBE_VERSION
docker tag kubeimage/kube-scheduler-amd64:$KUBE_VERSION k8s.gcr.io/kube-scheduler:$KUBE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION k8s.gcr.io/pause:$PAUSE_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION k8s.gcr.io/coredns:$CORE_DNS_VERSION
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION k8s.gcr.io/etcd:$ETCD_VERSION

# untag origin tag, the images won't be delete.
docker rmi kubeimage/kube-proxy-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-controller-manager-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-apiserver-amd64:$KUBE_VERSION
docker rmi kubeimage/kube-scheduler-amd64:$KUBE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:$PAUSE_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:$CORE_DNS_VERSION
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:$ETCD_VERSION

[root@k8s1 ~]# for i in {2..3};do scp get-k8s-images.sh k8s$i:~;done
[root@k8s1 ~]# sh get-k8s-images.sh
[root@k8s2 ~]# sh get-k8s-images.sh
[root@k8s3 ~]# sh get-k8s-images.sh 

 

初始化集群
# 使用kubeadm init初始化集群,ip為本機ip(在k8s-master上操作)
[root@k8s1 ~]# kubeadm init --kubernetes-version=v1.18.6 --apiserver-advertise-address=172.16.186.132 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.1.0.0/16
釋義:
--kubernetes-version=v1.18.6 : 加上該參數后啟動相關鏡像(剛才下載的那一堆)
--pod-network-cidr=10.244.0.0/16 :(Pod 中間網絡通訊我們用flannel,flannel要求是10.244.0.0/16,這個IP段就是Pod的IP段)
--service-cidr=10.1.0.0/16 : Service(服務)網段(和微服務架構有關)

初始化成功后,會有以下回顯

注意:kubeadm join 172.16.186.132:6443 --token rcdskf.v78ucocy4u3isan0 \
    --discovery-token-ca-cert-hash sha256:525162ce0fa511bd437a20e577f109394ad140ecfb632bdc96281f89c5f10f67   這一串一定要記下來

 

為需要使用kubectl的用戶進行配置(在k8s-master上操作)
[root@k8s1 ~]# mkdir -p $HOME/.kube
[root@k8s1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config

使用下面的命令確保所有的Pod都處於Running狀態,可能要等一會才能都變成Running狀態,這里我等了1個多小時,可以往下先安裝flannel 網絡,而后再查看
[root@k8s1 ~]# kubectl get pod --all-namespaces -o wide
下面這個命令也可以查看
[root@k8s1 ~]# kubectl get pods -n kube-system

集群網絡配置 (要讓Kubernetes Cluster能夠工作,必須安裝Pod網絡,否則Pod之間無法通信,這里選用flannel)
[root@k8s1 ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# 注意:修改集群初始化地址及鏡像能否拉去,如執行上述命令時報The connection to the server raw.githubusercontent.com was refused - did you specify the right host or port?的錯需查詢raw.githubusercontent.com的真正地址
https://site.ip138.com/raw.Githubusercontent.com/
輸入raw.githubusercontent.com
找到有一個是香港的站點
vim /etc/hosts
151.101.76.133 raw.githubusercontent.com

 

kubernetes集群中添加node節點
[root@k8s2 ~]# kubeadm join 172.16.186.132:6443 --token rcdskf.v78ucocy4u3isan0 \
    --discovery-token-ca-cert-hash sha256:525162ce0fa511bd437a20e577f109394ad140ecfb632bdc96281f89c5f10f67
[root@k8s3 ~]# kubeadm join 172.16.186.132:6443 --token rcdskf.v78ucocy4u3isan0 \
    --discovery-token-ca-cert-hash sha256:525162ce0fa511bd437a20e577f109394ad140ecfb632bdc96281f89c5f10f67

========================================================
注:沒有記錄集群 join 命令的可以通過以下方式重新獲取
kubeadm token create --print-join-command --ttl=0
========================================================

注意:上圖中提示使用kubectl get nodes命令獲取節點以查看該節點是否已加入集群。但是現在這個命令還不能用,
解決:在k8s1節點上將admin.conf文件發送到其他節點上並改名即可

[root@k8s1 ~]# for i in {2..3};do ssh k8s$i mkdir -p $HOME/.kube;done
[root@k8s1 ~]# for i in {2..3};do scp /etc/kubernetes/admin.conf  k8s$i:/root/.kube/config;done
[root@k8s1 ~]# for i in {1..3};do kubectl get nodes;done

[root@k8s1 ~]# kubectl get pod --all-namespaces -o wide
注:下圖要全部都為Running,遇到報錯記得要等一會再查看,這里我等了1.5個小時

遇到的問題:
如下圖所示有一個節點的狀態一直為CrashLoopBackOff,可以將master節點上的所有容器重啟(docker restart `docker ps -aq`)試試看


kube-proxy 開啟 ipvs(在k8s-master上操作)
[root@k8s1 ~]# kubectl get configmap kube-proxy -n kube-system -o yaml > kube-proxy-configmap.yaml
[root@k8s1 ~]# sed -i 's/mode: ""/mode: "ipvs"/' kube-proxy-configmap.yaml
[root@k8s1 ~]# kubectl apply -f kube-proxy-configmap.yaml
[root@k8s1 ~]# rm -f kube-proxy-configmap.yaml
[root@k8s1 ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
[root@k8s1 ~]# kubectl get pods -n kube-system

 

部署 kubernetes-dashboard

下載並修改Dashboard安裝腳本(在Master上執行)
參照官網安裝說明在master上執行:
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-  beta5/aio/deploy/recommended.yaml   
##如下載失敗請直接復制下面的文件        
[root@k8s1 ~]# cat > recommended.yaml<<-EOF
apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.0-beta1
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: kubernetes-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-metrics-scraper
  name: kubernetes-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: kubernetes-metrics-scraper
    spec:
      containers:
        - name: kubernetes-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.0
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
EOF

 

修改recommended.yaml文件內容

下面一張圖片中全部注釋

 

創建證書

[root@k8s1 ~]# mkdir dashboard-certs && cd dashboard-certs/
#創建命名空間
[root@k8s1 dashboard-certs]# kubectl create namespace kubernetes-dashboard     
## 刪除命名空間kubectl delete namespace kubernetes-dashboard
# 創建私鑰key文件
[root@k8s1 dashboard-certs]# openssl genrsa -out dashboard.key 2048
#證書請求
[root@k8s1 dashboard-certs]# openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
#自簽證書
[root@k8s1 dashboard-certs]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#創建kubernetes-dashboard-certs對象
[root@k8s1 dashboard-certs]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard

[root@k8s1 dashboard-certs]# cd

創建 dashboard 管理員
(1)創建賬號
[root@k8s1 ~]# vim dashboard-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard

#保存退出后執行
[root@k8s1 ~]# kubectl create -f dashboard-admin.yaml

 

用戶分配權限
[root@k8s1 ~]# vim dashboard-admin-bind-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-admin
  namespace: kubernetes-dashboard

 

#保存退出后執行
[root@k8s1 ~]# kubectl create -f dashboard-admin-bind-cluster-role.yaml

 

安裝 Dashboard

[root@k8s1 ~]# kubectl create -f  ~/recommended.yaml
注:這里會有一個提示,如下圖,可忽略

 

#檢查結果
[root@k8s1 ~]# kubectl get pods -A  -o wide     或者只查dashboard      kubectl get pods -A  -o wide | grep dashboard

 

查看並復制用戶Token
[root@k8s1 ~]# kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep dashboard-admin | awk '{print $1}')

注意:上圖中最后一行的token后面的一長串字符要保存下來,這是web端登陸時的密碼

 

瀏覽器訪問

https://172.16.186.132:30008/ 

注:下圖選擇“高級”---“接受風險並繼續”

下圖選擇Token並把上面第6步的token粘貼到下圖的Token位置,而后點擊Sign in登陸

 

 

==============================================
#如果進入網站提示證書錯誤執行,原路返回一步一步delete回到創建證書步驟重做
kubectl delete -f  ~/recommended.yaml    #刪除重做證書即可
==============================================

 

 

 

 

 

項目遷移到K8S平台是怎樣的流程

1、使用dockerfile制作docker 鏡像
2、控制器管理pod
    Deployment:無狀態部署,例如Web,微服務,API
    StatefulSet:有狀態部署,例如數據庫,ZK,ETCD
    DaemonSet:守護進程部署,例如監控Agent、日志Agent
    Job & CronJob:批處理,例如數據庫備份,郵件通知
3、pod數據持久化
容器部署過程中一般有以下三種數據:
    啟動時需要的初始數據,可以是配置文件
    啟動過程中產生的臨時數據,該臨時數據需要多個容器間共享
    啟動過程中產生的業務數據

 

 

4、暴露應用:
使用Service ClusterIP類型暴露集群內部應用訪問。
    Service定義了Pod的邏輯集合和訪問這個集合的策略
    Service引入為了解決Pod的動態變化,提供服務發現和負載均衡
    使用CoreDNS解析Service名稱

 


對外發布應用:
使用Ingress對外暴露你的應用。
    通過Service關聯Pod
    基於域名訪問
    通過Ingress Controller實現Pod的負載均衡
    支持TCP/UDP 4層和HTTP 7層(Nginx)

 

日志與監控
使用Prometheus監控集群中資源的狀態
使用ELK來收集應用的日志

 

 

 應用部署

 創建mysql的Deployment定義文件
root@k8s1 ~]# cat>>mysql-dep.yaml<<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - name: mysql
        image: mysql:5.7
        volumeMounts:
        - name: time-zone
          mountPath: /etc/localtime
        - name: mysql-data
          mountPath: /var/lib/mysql
        - name: mysql-logs
          mountPath: /var/log/mysql
        ports:
        - containerPort: 3306
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123456"
      volumes:
      - name: time-zone
        hostPath:
          path: /etc/localtime
      - name: mysql-data
        hostPath:
          path: /data/mysql/data
      - name: mysql-logs
        hostPath:
          path: /data/mysql/logs
EOF


發布Mysql
[root@k8s1 ~]# kubectl apply -f mysql-dep.yaml
查詢Mysql信息和Pod信息
[root@k8s1 ~]# kubectl get deployment
[root@k8s1 ~]# kubectl get rs

定義一個Service文件
[root@k8s1 ~]# vim mysql-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  ports:
    - port: 3306
  selector:
    app: mysql

發布Mysql SVC文件到集群中
[root@k8s1 ~]# kubectl create -f mysql-svc.yaml
查看
[root@k8s1 ~]# kubectl get services  或者  kubectl get svc

啟動Tomcat應用
創建tomcat的Deployment定義文件
[root@k8s1 ~]# vim myweb-dep.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myweb
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myweb
  strategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: myweb
    spec:
      containers:
      - name: myweb
        image: kubeguide/tomcat-app:v1
        volumeMounts:
        - name: time-zone
          mountPath: /etc/localtime
        - name: tomcat-logs
          mountPath: /usr/local/tomcat/logs
        ports:
        - containerPort: 8080
        env:
        - name: MYSQL_SERVICE_HOST
          value: '10.1.126.255'               #此處為mysql服務的Cluster IP,需要完后將該行中文刪除,不然會報錯
        - name: MYSQL_SERVICE_PORT
          value: '3306'
      volumes:
      - name: time-zone
        hostPath:
          path: /etc/localtime
      - name: tomcat-logs
        hostPath:
          path: /data/tomcat/logs

發布Tomcat deployment
[root@k8s1 ~]# kubectl create -f myweb-dep.yaml
[root@k8s1 ~]# kubectl get svc

定義一個Service文件
創建tomcat的Service定義文件
[root@k8s1 ~]# vim myweb-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: myweb
spec:
  type: NodePort
  ports:
    - port: 8080
      nodePort: 30001
  selector:
    app: myweb

發布Tomcat SVC文件到集群中
[root@k8s1 ~]# kubectl create -f myweb-svc.yaml

查詢Tomcat SVC信息
[root@k8s1 ~]# kubectl get svc

瀏覽器訪問

 

注意:如果30001端口不通的話,重新啟動、關閉firewalld防火牆
systemctl restart firewalld
注:因為kubernetes會在iptables里添加一些策略,需要再重新開啟關閉防火牆才會關閉掉這些策略。

通過瀏覽器訪問http://172.16.186.132:30001/demo/ 

點擊“Add...”,添加一條記錄並點擊Submit進行提交

 

上圖中點擊return后再刷新下頁面后,會看到剛添加的一條rambo的數據

進入容器中的數據庫看看是否有剛添加的數據
1、查看所有整整運行的pod
[root@k8s1 ~]# kubectl get pods -o wide
可以看出mysql的pod分別在2和3節點上運行着

注意:一個pod異常的間接性的,待會會恢復

來到2節點和3節點查看容器,剛插入的數據會在這2個節點間輪詢

注:第二個關於mysql的容器屬於網絡層面
進入容器

方法1:

可以看到2節點上沒有剛插入的那條數據

再來看看k8s3節點上有沒剛插入的數據(方法同節點2一樣),進倒數第二個容器

可以看出剛插入的rambo那條數據在節點3上,節點2上並沒有

 

使用kubectl進入容器
格式:kubectl exec -it <podName> -c <containerName> -n <namespace> -- <shell comand>
使用於當前pod只有一個容器的命令:
[root@k8s2 ~]# kubectl exec -it <podName> -- /bin/bash
使用於當前pod只有一個容器 --container or -c參數
[root@k8s1 ~]# kubectl exec -it <podName> --container <container> -- /bin/bash

 

 

該文檔僅作為備用,在部署應用時(非安裝時)可能會有錯誤或沒解釋清楚,請通過留言或下面二維碼進群討論即可,謝謝!!!!

 

 

    歡迎加入QQ群一起討論Linux、開源等技術


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM