簡介
本文章介紹如何使用ceph為k8s提供動態申請pv的功能。ceph提供底層存儲功能,cephfs方式支持k8s的pv的3種訪問模式ReadWriteOnce,ReadOnlyMany ,ReadWriteMany
,RBD支持ReadWriteOnce,ReadOnlyMany
兩種模式
訪問模式只是能力描述,並不是強制執行的,對於沒有按pvc聲明的方式使用pv,存儲提供者應該負責訪問時的運行錯誤。例如如果設置pvc的訪問模式為ReadOnlyMany
,pod掛載后依然可寫,如果需要真正的不可寫,申請pvc是需要指定 readOnly: true
參數
部署
部署k8s
部署ceph
在k8s集群中配置使用ceph
使用Ceph RBD
使用kubeadm安裝集群的額外配置
# 如果使用kubeadm部署的集群需要這些額外的步驟
# 由於使用動態存儲時 controller-manager 需要使用 rbd 命令創建 image
# 所以 controller-manager 需要使用 rbd 命令
# 由於官方controller-manager鏡像里沒有rbd命令
# 如果沒使用如下方式會報錯無法成功創建pvc
# 相關 issue https://github.com/kubernetes/kubernetes/issues/38923
cat >external-storage-rbd-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: rbd-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["kube-dns"]
verbs: ["list", "get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: rbd-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: rbd-provisioner
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: rbd-provisioner
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: rbd-provisioner
subjects:
- kind: ServiceAccount
name: rbd-provisioner
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: rbd-provisioner
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: rbd-provisioner
spec:
containers:
- name: rbd-provisioner
image: "quay.io/external_storage/rbd-provisioner:v2.0.0-k8s1.11"
env:
- name: PROVISIONER_NAME
value: ceph.com/rbd
serviceAccount: rbd-provisioner
EOF
kubectl apply -f external-storage-rbd-provisioner.yaml
# 查看狀態 等待running之后 再進行后續的操作
kubectl get pod -n kube-system
復制代碼
配置 storageclass
# 在k8s集群中所有節點安裝 ceph-common
# 需要使用kubelet使用rdb命令map附加rbd創建的image
yum install -y ceph-common
# 創建 osd pool 在ceph的mon或者admin節點
ceph osd pool create kube 4096
ceph osd pool ls
# 創建k8s訪問ceph的用戶 在ceph的mon或者admin節點
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=kube' -o ceph.client.kube.keyring
# 查看key 在ceph的mon或者admin節點
ceph auth get-key client.admin
ceph auth get-key client.kube
# 創建 admin secret
# CEPH_ADMIN_SECRET 替換為 client.admin 獲取到的key
export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system
# 在 default 命名空間創建pvc用於訪問ceph的 secret
# CEPH_KUBE_SECRET 替換為 client.kube 獲取到的key
export CEPH_KUBE_SECRET='AQBZK3VbTN/QOBAAIYi6CRLQcVevW5HM8lunOg=='
kubectl create secret generic ceph-user-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_KUBE_SECRET \
--namespace=default
# 查看 secret
kubectl get secret ceph-user-secret -o yaml
kubectl get secret ceph-secret -n kube-system -o yaml
# 配置 StorageClass
# 如果使用kubeadm創建的集群 provisioner 使用如下方式
# provisioner: ceph.com/rbd
cat >storageclass-ceph-rdb.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: dynamic-ceph-rdb
provisioner: ceph.com/rbd
# provisioner: kubernetes.io/rbd
parameters:
monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: kube-system
pool: kube
userId: kube
userSecretName: ceph-user-secret
fsType: ext4
imageFormat: "2"
imageFeatures: "layering"
EOF
# 創建
kubectl apply -f storageclass-ceph-rdb.yaml
# 查看
kubectl get sc
復制代碼
測試使用
# 創建pvc測試
cat >ceph-rdb-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ceph-rdb-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: dynamic-ceph-rdb
resources:
requests:
storage: 2Gi
EOF
kubectl apply -f ceph-rdb-pvc-test.yaml
# 查看
kubectl get pvc
kubectl get pv
# 創建 nginx pod 掛載測試
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod1
labels:
name: nginx-pod1
spec:
containers:
- name: nginx-pod1
image: nginx:alpine
ports:
- name: web
containerPort: 80
volumeMounts:
- name: ceph-rdb
mountPath: /usr/share/nginx/html
volumes:
- name: ceph-rdb
persistentVolumeClaim:
claimName: ceph-rdb-claim
EOF
kubectl apply -f nginx-pod.yaml
# 查看
kubectl get pods -o wide
# 修改文件內容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from Ceph RBD!!! > /usr/share/nginx/html/index.html'
# 訪問測試
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID
# 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f ceph-rdb-pvc-test.yaml
復制代碼
使用 CephFS
linux內核需要4.10+,否則會出現無法正常使用的問題,詳細issue信息 github.com/kubernetes-… centos7升級內核
在ceph集群創建CephFS
# 如下操作在ceph的mon或者admin節點
# CephFS需要使用兩個Pool來分別存儲數據和元數據
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools
# 創建一個CephFS
ceph fs new cephfs fs_metadata fs_data
# 查看
ceph fs ls
復制代碼
部署cephfs-provisioner
# 官方沒有cephfs動態卷支持
# 使用社區提供的cephfs-provisioner
cat >external-storage-cephfs-provisioner.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: cephfs-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: cephfs-provisioner
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: cephfs-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cephfs-provisioner
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create", "get", "delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cephfs-provisioner
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cephfs-provisioner
subjects:
- kind: ServiceAccount
name: cephfs-provisioner
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: cephfs-provisioner
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: cephfs-provisioner
spec:
containers:
- name: cephfs-provisioner
image: "quay.io/external_storage/cephfs-provisioner:v2.0.0-k8s1.11"
env:
- name: PROVISIONER_NAME
value: ceph.com/cephfs
command:
- "/usr/local/bin/cephfs-provisioner"
args:
- "-id=cephfs-provisioner-1"
serviceAccount: cephfs-provisioner
EOF
kubectl apply -f external-storage-cephfs-provisioner.yaml
# 查看狀態 等待running之后 再進行后續的操作
kubectl get pod -n kube-system
復制代碼
#####配置 storageclass
# 查看key 在ceph的mon或者admin節點
ceph auth get-key client.admin
# 創建 admin secret
# CEPH_ADMIN_SECRET 替換為 client.admin 獲取到的key
# 如果在測試 ceph rbd 方式已經添加 可以略過此步驟
export CEPH_ADMIN_SECRET='AQBBAnRbSiSOFxAAEZXNMzYV6hsceccYLhzdWw=='
kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \
--from-literal=key=$CEPH_ADMIN_SECRET \
--namespace=kube-system
# 查看 secret
kubectl get secret ceph-secret -n kube-system -o yaml
# 配置 StorageClass
cat >storageclass-cephfs.yaml<<EOF
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: dynamic-cephfs
provisioner: ceph.com/cephfs
parameters:
monitors: 11.11.11.111:6789,11.11.11.112:6789,11.11.11.113:6789
adminId: admin
adminSecretName: ceph-secret
adminSecretNamespace: "kube-system"
claimRoot: /volumes/kubernetes
EOF
# 創建
kubectl apply -f storageclass-cephfs.yaml
# 查看
kubectl get sc
復制代碼
測試使用
# 創建pvc測試
cat >cephfs-pvc-test.yaml<<EOF
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: cephfs-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: dynamic-cephfs
resources:
requests:
storage: 2Gi
EOF
kubectl apply -f cephfs-pvc-test.yaml
# 查看
kubectl get pvc
kubectl get pv
# 創建 nginx pod 掛載測試
cat >nginx-pod.yaml<<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod1
labels:
name: nginx-pod1
spec:
containers:
- name: nginx-pod1
image: nginx:alpine
ports:
- name: web
containerPort: 80
volumeMounts:
- name: cephfs
mountPath: /usr/share/nginx/html
volumes:
- name: cephfs
persistentVolumeClaim:
claimName: cephfs-claim
EOF
kubectl apply -f nginx-pod.yaml
# 查看
kubectl get pods -o wide
# 修改文件內容
kubectl exec -ti nginx-pod1 -- /bin/sh -c 'echo Hello World from CephFS!!! > /usr/share/nginx/html/index.html'
# 訪問測試
POD_ID=$(kubectl get pods -o wide | grep nginx-pod1 | awk '{print $(NF-1)}')
curl http://$POD_ID
# 清理
kubectl delete -f nginx-pod.yaml
kubectl delete -f cephfs-pvc-test.yaml