k8s 環境:
[root@k8s-master ~]# kubectl get node -o wide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME k8s-master Ready master 91d v1.18.8 192.168.1.230 <none> CentOS Linux 7 (Core) 5.8.2-1.el7.elrepo.x86_64 docker://19.3.12
k8s-node01 Ready <none> 91d v1.18.8 192.168.1.231 <none> CentOS Linux 7 (Core) 5.8.1-1.el7.elrepo.x86_64 docker://19.3.12
k8s-node02 Ready <none> 91d v1.18.8 192.168.1.232 <none> CentOS Linux 7 (Core) 5.8.1-1.el7.elrepo.x86_64 docker://19.3.12
注:本地卷 local 還不支持動態分配,然而還是需要創建 StorageClass 以延遲卷綁定,直到完成 pod 的調度。這是由 WaitForFirstConsumer
卷綁定模式指定的。
1、創建 SC
vi bxy-local-StorageClass.yaml
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: bxy-local-sc-volume provisioner: kubernetes.io/no-provisioner #指定 volume 插件類型 # reclaimPolicy: Retain #回收策略 默認 Delete volumeBindingMode: WaitForFirstConsumer #當 PVC 被 Pod 使用時,才觸發 PV 和后端存儲的創建,同時實現 PVC/PV 的綁定
storageclass.storage.k8s.io/bxy-local-sc-volume created
[root@k8s-node02 test]# kubectl get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE bxy-local-sc-volume kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 8s
2、創建 PV
vi bxy-local-PersistentVolume.yaml apiVersion: v1 kind: PersistentVolume metadata: name: bxy-local-pv-volume labels: name: bxy-local-pv-labels spec: capacity: #容量大小 storage: 5Gi volumeMode: Filesystem #default accessModes: #持久卷訪問模式 RWO/RWX/WOX 三種 RWO 為單機可讀寫 RWX 聯機可讀寫 ROX 聯機只讀 - ReadWriteOnce #local-volume RWO persistentVolumeReclaimPolicy: Delete #PVC 回收策略 storageClassName: bxy-local-sc-volume local: #本地掛載路徑 path: /opt/test/bxy/nginx nodeAffinity: #節點親和性,此處匹配節點標簽 hostname 為 k8s-node01 的 node required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - k8s-node01
#節點標簽查詢
[root@k8s-node02 test]# kubectl get node -o wide --show-labels
k8s-master Ready master 91d v1.18.8 192.168.1.230 <none> CentOS Linux 7 (Core) 5.8.2-1.el7.elrepo.x86_64 docker://19.3.12 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-master,kubernetes.io/os=linux,node-role.kubernetes.io/master=
k8s-node01 Ready <none> 91d v1.18.8 192.168.1.231 <none> CentOS Linux 7 (Core) 5.8.1-1.el7.elrepo.x86_64 docker://19.3.12 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node01,kubernetes.io/os=linux
k8s-node02 Ready <none> 91d v1.18.8 192.168.1.232 <none> CentOS Linux 7 (Core) 5.8.1-1.el7.elrepo.x86_64 docker://19.3.12 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-node02,kubernetes.io/os=linux
#啟動 & PV 狀態查看
[root@k8s-node02 test]# kubectl apply -f bxy-local-PersistentVolume.yaml persistentvolume/bxy-local-pv-volume created [root@k8s-node02 test]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE bxy-local-pv-volume 5Gi RWO Delete Available bxy-local-sc-volume 4s
#狀態欄 STATUS 字段為 Available
3、創建 PVC
vi bxy-local-PersistentVolumeClaim.yaml
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: bxy-local-pvc-volume spec: accessModes: - ReadWriteOnce volumeMode: Filesystem #這是一個文件系統 resources: requests: storage: 5Gi storageClassName: bxy-local-sc-volume selector: #此標簽與 PV labels 匹配 matchLabels: name: bxy-local-pv-labels
#啟動 & 狀態查詢
[root@k8s-node02 test]# kubectl apply -f bxy-local-PersistentVolumeClaim.yaml persistentvolumeclaim/bxy-local-pvc-volume created [root@k8s-node02 test]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE bxy-local-pvc-volume Pending bxy-local-sc-volume 12s
[root@k8s-node02 test]# kubectl describe pvc bxy-local-pvc-volume Name: bxy-local-pvc-volume Namespace: default StorageClass: bxy-local-sc-volume Status: Pending Volume: Labels: <none> Annotations: Finalizers: [kubernetes.io/pvc-protection] Capacity: Access Modes: VolumeMode: Filesystem Mounted By: <none> Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal WaitForFirstConsumer 11s (x6 over 79s) persistentvolume-controller waiting for first consumer to be created before binding
#因為 SC 里面指定了 volumeBindingMode: WaitForFirstConsumer 字段,所以只有當消費者使用 PVC 之后才會產生綁定
4、創建 NG-Deploy
vi bxy-local-nginx-deploy.yaml
apiVersion: apps/v1 kind: Deployment metadata: name: bxy-local-nginx-deploy spec: replicas: 2 #副本實例 selector: matchLabels: # spec.selector.matchLabels 要與 spec.template.metadata.labels 匹配!!!!不匹配不能啟動 k8s-app: bxy-local-nginx-deploy-labels template: metadata: labels: # Pod副本擁有的標簽,對應RC的Selector k8s-app: bxy-local-nginx-deploy-labels spec: containers: - name: bxy-local-nginx image: nginx imagePullPolicy: IfNotPresent volumeMounts: - mountPath: "/usr/share/nginx/html" name: bxy-local-pv-volume # spec.template.spec.containers.name.volumeMounts.name == spec.template.spec.volumes.name volumes: - name: bxy-local-pv-volume #PersistentVolume metadata.name persistentVolumeClaim: claimName: bxy-local-pvc-volume
啟動 & 狀態
[root@k8s-node02 test]# kubectl apply -f bxy-local-nginx-deploy.yaml deployment.apps/bxy-local-nginx-deploy created [root@k8s-node02 test]# kubectl get deploy NAME READY UP-TO-DATE AVAILABLE AGE bxy-local-nginx-deploy 1/1 1 1 5s nfs-client-provisioner 1/1 1 1 5h19m tomcat 1/1 1 1 90d [root@k8s-master ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES bxy-local-nginx-deploy-59d9f57449-2lbrt 1/1 Running 0 106s 10.244.1.84 k8s-node01 <none> <none> bxy-local-nginx-deploy-59d9f57449-xbsmj 1/1 Running 0 105s 10.244.1.83 k8s-node01 <none> <none> nfs-client-provisioner-6ffd9d54c5-zzkz7 1/1 Running 0 5h22m 10.244.1.76 k8s-node01 <none> <none> tomcat-cb9688cd5-xnwqb 1/1 Running 17 90d 10.244.2.119 k8s-node02 <none> <none>
注意我 NG 的 deploy文件是寫在 k8s-node02 上面
可以看到兩台 NG 實例都被調度到 k8s-node01 上面了,后面如果在增加實例數,仍然會被調度到 k8s-node01 這台節點上,這與我們 PV 中 nodeAffinity 字段設置有關
# NG 實例未啟動前 PV 狀態 [root@k8s-node02 test]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE bxy-local-pv-volume 5Gi RWO Delete Available bxy-local-sc-volume 4s #狀態欄 STATUS 字段為 Available
# NG 實例未啟動前 PVC 狀態 [root@k8s-node02 test]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE bxy-local-pvc-volume Pending bxy-local-sc-volume 12s
#再次查看 PV & PVC 狀態發現 status 字段為 Bound
[root@k8s-master ~]# kubectl get pv NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE bxy-local-pv-volume 5Gi RWO Delete Bound default/bxy-local-pvc-volume bxy-local-sc-volume 32m [root@k8s-master ~]# kubectl get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE bxy-local-pvc-volume Bound bxy-local-pv-volume 5Gi RWO bxy-local-sc-volume 26m
5、配置 Service
apiVersion: v1 kind: Service metadata: name: bxy-local-nginx-sc spec: clusterIP: 10.101.138.36 #我使用的網絡插件是 flannel ,clusterIP 配置只要在集群 IP 網段內即可 externalTrafficPolicy: Cluster #Cluster或者Local(Local 流量不會轉發到其他節點) ports: - nodePort: 29605 #外部訪問端口 port: 19605 #內網訪問端口 targetPort: 80 #容器啟動端口 protocol: TCP selector: #此標簽與 deploy 中 labels 字段匹配 k8s-app: bxy-local-nginx-deploy-labels type: LoadBalancer
啟動 & 狀態
[root@k8s-node01 test]# kubectl apply -f bxy-local-nginx-Service.yaml service/bxy-local-nginx-sc created [root@k8s-node01 test]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE bxy-local-nginx-sc LoadBalancer 10.101.138.36 <pending> 19605:29605/TCP 7s kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 91d nginx-nfs LoadBalancer 10.101.138.35 <pending> 19606:29606/TCP 2d3h tomcat LoadBalancer 10.100.191.78 <pending> 19999:20000/TCP 90d
6、測試
[root@k8s-master ~]# curl 10.101.138.36:19605 <html> <head><title>403 Forbidden</title></head> <body> <center><h1>403 Forbidden</h1></center> <hr><center>nginx/1.19.4</center> </body> </html>
我使用的時集群內部 IP 跑的 NG ,報錯 403 時因為我在 local 本地掛載目錄下沒有添加任何文件
在這里其實我們 PV + PVC + SC + local 已經測試成功了,因為 NG 鏡像運行成功后,它會在容器里有 NG 的 index.html 文件,
但是我們訪問是 403 說明 NG 默認網頁文件已經被 local 本地空文件夾給取締了
我們添加一個新文件試試
[root@k8s-node01 nginx]# pwd
/opt/test/bxy/nginx
[root@k8s-node01 nginx]# ls
[root@k8s-node01 nginx]# echo 'k8s local mount test !!!' > index.html
[root@k8s-node01 nginx]# ls
index.html
[root@k8s-node01 nginx]# curl 10.101.138.36:19605 #集群內部 IP 訪問
k8s local mount test !!!
[root@k8s-node01 nginx]# curl 192.168.1.230:29605 #集群外部 k8s-master IP 訪問
k8s local mount test !!!
[root@k8s-node01 nginx]# curl 192.168.1.231:29605 #集群外部 k8s-node01 IP 訪問
k8s local mount test !!!
[root@k8s-node01 nginx]# curl 192.168.1.232:29605 #集群外部 k8s-node02 IP 訪問
k8s local mount test !!!
至於集群外部 IP 除了部署服務器,其它節點也能訪問 NG 是因為我們在 Service 中配置了 externalTrafficPolicy: Cluster 字段
至此 PV + PVC + SC + local 本地持久卷掛載已經完成