kubernetes 使用ceph實現動態持久卷存儲


k8s使用ceph存儲

ceph提供底層存儲功能,cephfs方式支持k8s的pv的3種訪問模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany ,RBD支持ReadWriteOnce,ReadOnlyMany兩種模式

動態供給主要是能夠自動幫你創建pv,需要多大的空間就創建多大的pv。k8s幫助創建pv,創建pvc就直接api調用存儲類來尋找pv。

如果是存儲靜態供給的話,會需要我們手動去創建pv,如果沒有足夠的資源,找不到合適的pv,那么pod就會處於pending等待的狀態。而動態供給主要的一個實現就是StorageClass存儲對象,其實它就是聲明你使用哪個存儲,然后幫你去連接,再幫你去自動創建pv。

 

使用Ceph RBD作為持久數據卷

配置 rbd-provisioner

1、編寫yaml文件

[root@k8s-master ~]# cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["kube-dns"]
    verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-provisioner
subjects:
  - kind: ServiceAccount
    name: rbd-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: rbd-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: rbd-provisioner
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: rbd-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rbd-provisioner
subjects:
- kind: ServiceAccount
  name: rbd-provisioner
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rbd-provisioner
  namespace: kube-system
spec:
  selector:
    matchLabels:
      app: rbd-provisioner
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: rbd-provisioner
    spec:
      containers:
      - name: rbd-provisioner
        image: "registry.cn-chengdu.aliyuncs.com/ives/rbd-provisioner:v2.0.0-k8s1.11"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/rbd
      serviceAccount: rbd-provisioner
EOF

2、創建相關資源

[root@k8s-master ~]# kubectl apply -f external-storage-rbd-provisioner.yaml

[root@k8s-master ~]# kubectl get pods -n kube-system  |grep rbd
rbd-provisioner-7c77dcfd67-9xv2m     1/1     Running   0          59s

 

配置 storageclass

創建pod時,kubelet需要使用rbd命令去檢測和掛載pv對應的ceph image,所以要在所有k8s的worker節點安裝ceph客戶端ceph-common。將ceph的ceph.client.admin.keyring和ceph.conf文件拷貝到master的/etc/ceph目錄下。

1、安裝ceph-common(k8s所有工作節點)

# yum -y install ceph-common

2、創建 osd pool,在ceph的mon或者admin節點

[root@ceph_node1 ~]# ceph osd pool create kube 8
pool 'kube' created

[root@ceph_node1 ~]# ceph osd pool ls 
kube

3、創建k8s訪問ceph的用戶,在ceph的mon或者admin節點

[root@ceph_node1 ~]# ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring

4、查看key,在ceph的mon或者admin節點

[root@ceph_node1 ~]# ceph auth get-key client.admin
AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==

[root@ceph_node1 ~]# ceph auth get-key client.kube
AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw==

5、創建admin secret,在k8s管理節點

#CEPH_ADMIN_SECRET替換為 client.admin 獲取到的key
[root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='

[root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

6、在default命名空間創建pvc用於訪問ceph 的secret,在k8s管理節點

#CEPH_KUBE_SECRET替換為 client.kube 獲取到的key
[root@k8s-master ~]# export CEPH_KUBE_SECRET='AQC5+vJehk7XIRAAr9mtGFHlUSfT7yQMANeWaw=='

[root@k8s-master ~]# kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default

7、查看secret

[root@k8s-master ~]# kubectl get secret ceph-user-secret -o yaml
[root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml

8、配置StorageClass

[root@k8s-master ~]# cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
parameters:
  monitors: 192.168.3.27:6789,192.168.3.60:6789,192.168.3.95:6789
  adminId: admin
  adminSecretName: ceph-secret
  adminSecretNamespace: kube-system
  pool: kube
  userId: kube
  userSecretName: ceph-user-secret
  fsType: ext4
  imageFormat: "2"
  imageFeatures: "layering"
EOF

9、創建StorageClass

[root@k8s-master ~]# kubectl apply -f storageclass-ceph-rdb.yaml

10、查看

[root@k8s-master ~]# kubectl get sc

 

測試使用

1、創建pvc測試

[root@k8s-master ~]# cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: ceph-rdb-claim
spec:
  accessModes:     
    - ReadWriteOnce
  storageClassName: dynamic-ceph-rdb
  resources:
    requests:
      storage: 2Gi
EOF

[root@k8s-master ~]# kubectl apply -f ceph-rdb-pvc-test.yaml 
persistentvolumeclaim/ceph-rdb-claim created

2、查看pvc和pv

[root@k8s-master ~]# kubectl get pvc 
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS       AGE
ceph-rdb-claim   Bound    pvc-bd2363f1-a841-46d0-ad54-99267173bc04   2Gi        RWO            dynamic-ceph-rdb   16s

[root@k8s-master ~]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                    STORAGECLASS       REASON   AGE
pvc-bd2363f1-a841-46d0-ad54-99267173bc04   2Gi        RWO            Delete           Bound       default/ceph-rdb-claim   dynamic-ceph-rdb            29s

3、編寫nginx pod資源配置清單進行測試

[root@k8s-master ~]# cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod1
  labels:
    name: nginx-pod1
spec:
  containers:
  - name: nginx-pod1
    image: nginx:alpine
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: ceph-rdb
      mountPath: /usr/share/nginx/html
  volumes:
  - name: ceph-rdb
    persistentVolumeClaim:
      claimName: ceph-rdb-claim
EOF

4、創建pod 並查看

[root@k8s-master ~]# kubectl apply -f nginx-pod.yaml 
pod/nginx-pod1 created

[root@k8s-master ~]# kubectl get pods 
NAME         READY   STATUS    RESTARTS   AGE
nginx-pod1   1/1     Running   0          2m25s

[root@k8s-master ~]# kubectl get pods -o wide 
NAME         READY   STATUS    RESTARTS   AGE     IP           NODE        NOMINATED NODE   READINESS GATES
nginx-pod1   1/1     Running   0          2m34s   10.244.1.5   k8s-node1   <none>           <none>

5、修改文件內容

[root@k8s-master ~]# kubectl exec -it nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'

6、訪問測試

[root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod1 |awk '{print $(NF-3)}')
[root@k8s-master ~]# curl $POD_IP
Hello World from Ceph RBD!!!

7、清理

[root@k8s-master ~]# kubectl delete -f nginx-pod.yaml 

[root@k8s-master ~]# kubectl delete -f ceph-rdb-pvc-test.yaml

 

使用CephFS作為持久數據卷

Ceph端創建CephFS pool

1、創建兩個pool分別存儲數據和元數據,在ceph的mon或者admin節點 (這里測試,所以只給了8個pg_num)

[root@ceph_node1 ~]# ceph osd pool create fs_data 8
pool 'fs_data' created
[root@ceph_node1 ~]# ceph osd pool create fs_metadata 8
pool 'fs_metadata' created

2、創建一個CephFS,在ceph的mon或者admin節點

[root@ceph_node1 ~]# ceph fs new cephfs fs_metadata fs_data
new fs with metadata pool 8 and data pool 7

3、查看

[root@ceph_node1 ~]# ceph fs ls 
name: cephfs, metadata pool: fs_metadata, data pools: [fs_data ]

 

配置 cephfs-provisioner

官方沒有提供cephfs動態卷支持,使用社區提供的cephfs-provisioner

1、編寫yaml文件

[root@k8s-master ~]# cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-provisioner
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-provisioner
subjects:
  - kind: ServiceAccount
    name: cephfs-provisioner
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cephfs-provisioner
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: cephfs-provisioner
  namespace: kube-system
rules:
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["create", "get", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: cephfs-provisioner
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: cephfs-provisioner
subjects:
- kind: ServiceAccount
  name: cephfs-provisioner
  namespace: kube-system

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: cephfs-provisioner
  namespace: kube-system
spec:
  selector:
    matchLabels:
      app: cephfs-provisioner
  replicas: 1
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: cephfs-provisioner
    spec:
      containers:
      - name: cephfs-provisioner
        image: "registry.cn-chengdu.aliyuncs.com/ives/cephfs-provisioner:latest"
        env:
        - name: PROVISIONER_NAME
          value: ceph.com/cephfs
        command:
        - "/usr/local/bin/cephfs-provisioner"
        args:
        - "-id=cephfs-provisioner-1"
      serviceAccount: cephfs-provisioner
EOF

2、創建相關資源

[root@k8s-master ~]# kubectl apply -f external-storage-cephfs-provisioner.yaml

[root@k8s-master ~]# kubectl get pods -n kube-system |grep cephfs
cephfs-provisioner-6d76ff6bd5-zzlmt   1/1     Running   0          28s

 

配置 storageclass

1、查看key,在ceph的mon或者admin節點

[root@ceph_node1 ~]# ceph auth get-key client.admin
AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg==

2、創建admin secret,在k8s管理節點

#CEPH_ADMIN_SECRET替換為 client.admin 獲取到的key
[root@k8s-master ~]# export CEPH_ADMIN_SECRET='AQCzcPFeYnOoABAATaM1Wt8tMgvYTQjj6YEuVg=='

[root@k8s-master ~]# kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system

3、查看secret

[root@k8s-master ~]# kubectl get secret ceph-secret -n kube-system -o yaml

4、配置StorageClass

[root@k8s-master ~]# cat >storageclass-cephfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: dynamic-cephfs
provisioner: ceph.com/cephfs
parameters:
    monitors: 192.168.3.27:6789,192.168.3.60:6789,192.168.3.95:6789
    adminId: admin
    adminSecretName: ceph-secret
    adminSecretNamespace: "kube-system"
    claimRoot: /volumes/kubernetes
EOF

5、創建StorageClass

[root@k8s-master ~]# kubectl apply -f storageclass-cephfs.yaml 
storageclass.storage.k8s.io/dynamic-cephfs created

6、查看

[root@k8s-master ~]# kubectl get sc 
NAME               PROVISIONER       RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
dynamic-cephfs     ceph.com/cephfs   Delete          Immediate           false                  17s

 

測試使用

1、創建pvc測試

[root@k8s-master ~]# cat >cephfs-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: cephfs-claim
spec:
  accessModes:     
    - ReadWriteMany
  storageClassName: dynamic-cephfs
  resources:
    requests:
      storage: 2Gi
EOF

[root@k8s-master ~]# kubectl apply -f cephfs-pvc-test.yaml 
persistentvolumeclaim/cephfs-claim created

2、查看pv和pvc

[root@k8s-master ~]# kubectl get pvc 
NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS     AGE
cephfs-claim   Bound    pvc-b8194840-2664-418c-bad1-df1a4b028f30   2Gi        RWX            dynamic-cephfs   3s

[root@k8s-master ~]# kubectl get pv |grep pvc
pvc-b8194840-2664-418c-bad1-df1a4b028f30   2Gi        RWX            Delete           Bound       default/cephfs-claim   dynamic-cephfs            33s

3、編寫nginx pod資源配置清單進行測試

[root@k8s-master ~]# cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod2
  labels:
    name: nginx-pod2
spec:
  containers:
  - name: nginx-pod2
    image: nginx
    ports:
    - name: web
      containerPort: 80
    volumeMounts:
    - name: cephfs
      mountPath: /usr/share/nginx/html
  volumes:
  - name: cephfs
    persistentVolumeClaim:
      claimName: cephfs-claim
EOF

4、創建pod 並查看

[root@k8s-master ~]# kubectl apply -f nginx-pod.yaml 
pod/nginx-pod2 created

[root@k8s-master ~]# kubectl get pods 
NAME         READY   STATUS    RESTARTS   AGE
nginx-pod2   1/1     Running   0          16s

[root@k8s-master ~]# kubectl get pods -o wide
NAME         READY   STATUS    RESTARTS   AGE   IP           NODE        NOMINATED NODE   READINESS GATES
nginx-pod2   1/1     Running   0          88s   10.244.1.7   k8s-node1   <none>           <none>

5、修改文件內容

[root@k8s-master ~]# kubectl exec -it nginx-pod2 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'

6、訪問測試

[root@k8s-master ~]# POD_IP=$(kubectl get pods -o wide |grep nginx-pod2 |awk '{print $(NF-3)}')

[root@k8s-master ~]# curl $POD_IP
Hello World from CephFS!!!

7、清理

[root@k8s-master ~]# kubectl delete -f nginx-pod.yaml 

[root@k8s-master ~]# kubectl delete -f cephfs-pvc-test.yaml 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM