Kubernetes存儲——ceph(集群)


一、搭建ceph(集群)版本:rpm-nautilus

1.1 服務器規划

master(k8s集群) node1(k8s集群) node2(k8s集群)
192.168.99.201 192.168.99.202 192.168.99.203
ceph-01(ceph集群) ceph-02(ceph集群) ceph-03(ceph集群) ceph-client(ceph集群)
192.168.99.204 192.168.99.205 192.168.99.206 192.168.99.207

所有ceph服務器另外准備一塊磁盤(裸盤)(/dev/sdb)

添加新磁盤
這里在所有節點添加1塊50GB的新磁盤:/dev/sdb,作為OSD盤,提供存儲空間,添加完成后掃描磁盤,確保主機能夠正常識別到:

#掃描 SCSI 總線並添加 SCSI 設備
# for host in $(ls /sys/class/scsi_host) ; do echo "- - -" > /sys/class/scsi_host/$host/scan; done

#重新掃描 SCSI 總線
# for scsi_device in $(ls /sys/class/scsi_device/); do echo 1 > /sys/class/scsi_device/$scsi_device/device/rescan; done

#查看已添加的磁盤,能夠看到sdb說明添加成功
lsblk

1.2 環境准備

所有 ceph (服務端 + 客戶端) 添加 yum 源 (rpm-nautilus版本) (ceph version 14.2.22)

$ cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
priority=1
EOF
[root@ceph-01 /etc/ceph]# ssh-keygen

$ for i in 192.168.99.{201..207}; do echo ">>> $i";ssh-copy-id $i;done

$ for i in ceph-{01..03}; do echo ">>> $i";ssh-copy-id $i;done
[root@ceph-01 /etc/ceph]# yum -y install ceph-deploy python-setuptools

[root@ceph-01 /etc/ceph]# ceph-deploy --version 
[root@ceph-01 /etc/ceph]# mkdir /etc/ceph && cd /etc/ceph

[root@ceph-01 /etc/ceph]# ceph-deploy new ceph-01
[root@ceph-01 /etc/ceph]# ls
ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring
[root@ceph-01 /etc/ceph]# yum -y install ceph ceph-radosgw
[root@ceph-01 /etc/ceph]# ceph -v

[root@ceph-02 ~]# yum -y install ceph ceph-radosgw
[root@ceph-02 ~]# ceph -v

[root@ceph-03 ~]# yum -y install ceph ceph-radosgw
[root@ceph-03 ~]# ceph -v

上面操作也可以用下面的命令執行
$ ceph-deploy install ceph-01 ceph-02 ceph-03
[root@ceph-01 /etc/ceph]# echo public network = 192.168.99.0/24 >> /etc/ceph/ceph.conf

監控節點初始化,並將配置文件同步到所有節點

[root@ceph-01 /etc/ceph]# ceph-deploy mon create-initial

[root@ceph-01 /etc/ceph]# ps -ef | grep ceph-mon

[root@ceph-01 /etc/ceph]# ceph health
HEALTH_WARN mon is allowing insecure global_id reclaim

[root@ceph-01 /etc/ceph]# ceph-deploy admin ceph-01 ceph-02 ceph-03
[root@ceph-01 /etc/ceph]# ceph -s

禁用不安全模式:mon is allowing insecure global_id reclaim

[root@ceph-01 /etc/ceph]# ceph config set mon auth_allow_insecure_global_id_reclaim false

[root@ceph-01 /etc/ceph]# ceph health
[root@ceph-01 /etc/ceph]# ceph -s

為了防止mon單點故障,你可以加多個mon節點(建議奇數個,因為有quorum仲裁投票)

[root@ceph-01 /etc/ceph]# ceph-deploy mon add ceph-02
[root@ceph-01 /etc/ceph]# ceph-deploy mon add ceph-03
[root@ceph-01 /etc/ceph]# ceph -s

查看mon各種狀態

#查看 mon 狀態信息
[root@ceph-01 /etc/ceph]# ceph mon stat 
#查看 mon 的選舉狀態
[root@ceph-01 /etc/ceph]# ceph quorum_status
#查看 mon 映射信息
[root@ceph-01 /etc/ceph]# ceph mon dump
#查看 mon 詳細狀態
[root@ceph-01 /etc/ceph]# ceph daemon mon.ceph-01 mon_status

創建 mgr (管理)

# 創建一個mgr
[root@ceph-01 /etc/ceph]# ceph-deploy mgr create ceph-01
[root@ceph-01 /etc/ceph]# ceph -s

# 添加多個mgr可以實現HA
[root@ceph-01 /etc/ceph]# ceph-deploy mgr create ceph-02
[root@ceph-01 /etc/ceph]# ceph-deploy mgr create ceph-03
[root@ceph-01 /etc/ceph]# ceph -s

創建 osd (存儲盤)

[root@ceph-01 /etc/ceph]# ceph-deploy disk --help
[root@ceph-01 /etc/ceph]# ceph-deploy osd --help

[root@ceph-01 /etc/ceph]# ceph-deploy disk list ceph-01
[root@ceph-01 /etc/ceph]# ceph-deploy disk list ceph-02
[root@ceph-01 /etc/ceph]# ceph-deploy disk list ceph-03
[root@ceph-01 /etc/ceph]# ceph-deploy disk zap ceph-01 /dev/sdb
[root@ceph-01 /etc/ceph]# ceph-deploy disk zap ceph-02 /dev/sdb
[root@ceph-01 /etc/ceph]# ceph-deploy disk zap ceph-03 /dev/sdb

[root@ceph-01 /etc/ceph]# ceph-deploy osd create --data /dev/sdb ceph-01
[root@ceph-01 /etc/ceph]# ceph-deploy osd create --data /dev/sdb ceph-02
[root@ceph-01 /etc/ceph]# ceph-deploy osd create --data /dev/sdb ceph-03

[root@server01 ceph]# ceph -s

查看 ceph osd 各種狀態

#查看 osd 運行狀態
[root@ceph-01 /etc/ceph]# ceph osd stat
#查看 osd 映射信息
[root@ceph-01 /etc/ceph]# ceph osd dump
#查看數據延遲
[root@ceph-01 /etc/ceph]# ceph osd perf
#詳細列出集群每塊磁盤的使用情況
[root@ceph-01 /etc/ceph]# ceph osd df
#查看 osd 目錄樹
[root@ceph-01 /etc/ceph]# ceph osd tree
#查看最大 osd 的個數
[root@ceph-01 /etc/ceph]# ceph osd getmaxosd

時間導致的集群不健康,排錯方法:

[root@ceph-01 /etc/ceph]# ceph -s
  cluster:
    id:     74cbea9d-a4a0-4efc-a267-38a595bb2174
    health: HEALTH_WARN
            clock skew detected on mon.ceph-02
1、ntpd
root@ceph-02 ~]# systemctl restart ntpd
root@ceph-02 ~]# systemctl restart ntpd
操作節點上重啟ceph-mon.target服務
[root@ceph-01 /etc/ceph]# systemctl restart ceph-mon.target

2、chronyd
root@ceph-02 ~]# systemctl restart chronyd
root@ceph-02 ~]# systemctl restart chronyd
操作節點上重啟ceph-mon.target服務
[root@ceph-01 /etc/ceph]# systemctl restart ceph-mon.target

3、在global配置段里加上下面兩句(調整時間偏差閾值)
cat >> /etc/ceph/ceph.conf << EOF
mon clock drift allowed = 2
mon clock drift warn backoff = 30
EOF
再把修改的配置同步到所有節點(前面同步過配置文件,所以這次命令有點不同,這是同步覆蓋過去)
[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03
再回到ceph-01操作節點上重啟ceph-mon.target服務
[root@ceph-01 /etc/ceph]# systemctl restart ceph-mon.target

在ceph.conf上添加刪除pool的配置參數

# 在[global]配置下加上這一句
[root@ceph-01 /etc/ceph]# echo mon_allow_pool_delete = true >> /etc/ceph/ceph.conf

[root@ceph-01 /etc/ceph]# echo mon_max_pg_per_osd = 2000 >> /etc/ceph/ceph.conf

# 添加如上設置后,把ceph.conf同步到所有mon節點
[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03

# 重啟監控服務,給所有的mon節點重啟服務
[root@ceph-01 /etc/ceph]# systemctl restart ceph-mon.target
[root@ceph-02 ~]# systemctl restart ceph-mon.target
[root@ceph-03 ~]# systemctl restart ceph-mon.target
[root@ceph-0n ~]# systemctl status ceph-mon.target

# 刪除時pool名輸兩次,后再接--yes-i-really-really-mean-it參數就可以刪除了
[root@ceph-01 /etc/ceph]# ceph osd pool delete test_pool test_pool --yes-i-really-reallymean-it
or
[root@ceph-01 /etc/ceph]# rados rmpool test_pool test_pool --yes-i-really-really-mean-it

1.3 ceph 文件存儲

1.3.1 創建文件存儲

第一步:在ceph-01部署節點上同步配置文件,並創建mds

先同步一下配置文件,(因為前面在 ceph-01 的配置文件里加了 mon_allow_pool_delete = true 所以要同步后,才能成功執行下一條命令)
[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03

#這里做三個mds
[root@ceph-01 /etc/ceph]# ceph-deploy mds create ceph-01 ceph-02 ceph-03

[root@ceph-01 /etc/ceph]# ceph -s

第2步: 一個Ceph文件系統需要至少兩個RADOS存儲池,一個用於數據,一個用於元數據。

[root@ceph-01 /etc/ceph]# ceph osd pool create cephfs_pool 128
pool 'cephfs_pool' created
[root@ceph-01 /etc/ceph]# ceph osd pool create cephfs_metadata 64
pool 'cephfs_metadata' created
[root@ceph-01 /etc/ceph]# ceph osd pool ls |grep cephfs
cephfs_pool
cephfs_metadata

第3步: 創建Ceph文件系統,並確認客戶端訪問的節點

[root@ceph-01 /etc/ceph]# ceph fs new cephfs cephfs_metadata cephfs_pool
new fs with metadata pool 2 and data pool 1
[root@ceph-01 /etc/ceph]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_pool ]
[root@ceph-01 /etc/ceph]# ceph mds stat
cephfs:1 {0=ceph-01=up:active} 2 up:standby #ceph-01為up狀態

第4步: 在ceph-01(上面查看是ceph-01為up狀態)上創建客戶端掛載需要的驗證key文件,並傳給客戶端 ceph默認啟用了cephx認證,要求客戶端的掛載必須要用戶名和密碼驗證

使用ceph-authtool驗證工具產生密碼key文件
[root@ceph-01 /etc/ceph]# ceph-authtool -p /etc/ceph/ceph.client.admin.keyring > /etc/ceph/admin.key

拷貝給客戶端
[root@ceph-01 /etc/ceph]# scp admin.key ceph-client:/root

第5步: 部署client節點

[root@ceph-01 /etc/ceph]# ssh-copy-id -i ceph-client

[root@ceph-01 /etc/ceph]# ceph-deploy install ceph-client

[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03 ceph-client
#請確認上述命令是否有效

第6步: 在客戶端安裝ceph-fuse,並使用ceph-01產生的key文件進行掛載

[root@ceph-client ~]# yum install ceph-fuse -y

下面命令為掛載命令,192.168.99.204為ceph-01的IP; /root/admin.key為密碼key文件
[root@ceph-client ~]# mount -t ceph 192.168.99.204:6789:/ /mnt -o name=admin,secretfile=/root/admin.key

[root@ceph-client ~]# df -h |tail -1
192.168.99.204:6789:/      47G     0   47G   0% /mnt

第7步: 在客戶端讀寫測試

[root@ceph-client ~]# echo haha > /mnt/123.txt
[root@ceph-client ~]# cat /mnt/123.txt
haha

練習: 請自行再加一個客戶端掛載(也可以直接使用ceph-03模擬客戶端),測試兩個客戶端是否能實現同讀同寫?

[root@ceph-01 /etc/ceph]# scp /etc/ceph/admin.key ceph-03:/root

[root@ceph-03 ~]# mount -t ceph 192.168.99.204:6789:/ /mnt -o name=admin,secretfile=/root/admin.key

[root@ceph-03 ~]# df -h |tail -1
192.168.99.204:6789:/      47G     0   47G   0% /mnt

兩個客戶端讀寫測試:可以實現同讀同寫

1.3.2 刪除文件存儲

第1步: 在客戶端上刪除數據,並umount所有掛載

[root@ceph-client ~]# rm /mnt/* -rf
[root@ceph-client ~]# umount /mnt/
[root@ceph-03 ~]# umount /mnt/

第2步: 停掉所有節點的mds(只有停掉mds才能刪除文件存儲)

[root@ceph-01 /etc/ceph]# systemctl stop ceph-mds.target
[root@ceph-02 ~]# systemctl stop ceph-mds.target
[root@ceph-03 ~]# systemctl stop ceph-mds.target

第3步: 回到OSD存儲端刪除

[root@ceph-01 /etc/ceph]# ceph fs rm cephfs --yes-i-really-mean-it

[root@ceph-01 /etc/ceph]# ceph osd pool delete cephfs_metadata cephfs_metadata --yes-i-really-really-mean-it
pool 'cephfs_metadata' removed

[root@ceph-01 /etc/ceph]# ceph osd pool delete cephfs_pool cephfs_pool --yes-i-really-really-mean-it
pool 'cephfs_pool' removed

第4步: 可以將mds服務再次啟動(可選項,如果以后還要用文件存儲就需要啟動)

[root@ceph-01 /etc/ceph]# systemctl start ceph-mds.target
[root@ceph-02 ~]# systemctl start ceph-mds.target
[root@ceph-03 ~]# systemctl start ceph-mds.target
[root@ceph-0n ~]# systemctl status ceph-mds.target

1.4 ceph 塊存儲

1.4.1 創建塊存儲

第1步: 在ceph-01部署節點上同步文件到所有節點(包括client)

[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03 ceph-client

第2步:建立存儲池,並初始化

[root@ceph-client ~]# ceph osd pool create rbd_pool 128
pool 'rbd_pool' created

[root@ceph-client ~]# rbd pool init rbd_pool

第3步:創建一個存儲卷(這里卷名為volume1,大小為500M)

[root@ceph-client ~]# rbd create volume1 --pool rbd_pool --size 500
[root@ceph-client ~]# rbd ls rbd_pool
volume1
[root@ceph-client ~]# rbd info volume1 -p rbd_pool

第4步: 將創建的卷映射成塊設備

# 因為rbd鏡像的一些特性,OS kernel並不支持,所以映射報錯
[root@ceph-client /etc/ceph]# rbd map rbd_pool/volume1

# 解決方法:
# disable掉相關特性
[root@ceph-client ~]# rbd feature disable rbd_pool/volume1 object-map fast-diff deep-flatten

# 再次映射
[root@client ~]# rbd map rbd_pool/volume1
/dev/rbd0

# 查看映射(如果要取消映射,可以使用rbd unmap /dev/rbd0)
[root@ceph-client ~]# rbd showmapped
id pool     namespace image   snap device
0  rbd_pool           volume1 -    /dev/rbd0

第5步: 塊存儲使用

[root@ceph-client ~]# lsblk
[root@ceph-client ~]# mkfs.xfs /dev/rbd0
[root@ceph-client ~]# mount /dev/rbd0 /mnt/
[root@ceph-client ~]# df -h |tail -1
/dev/rbd0                498M   26M  473M   6% /mnt

[root@ceph-client ~]# echo yyds > /mnt/456.txt
[root@ceph-client ~]# cat /mnt/456.txt
yyds
1.4.2 塊存儲擴容與裁減
  • 在線擴容
# 500M擴容成800M
[root@ceph-client ~]# rbd resize --size 800 rbd_pool/volume1
Resizing image: 100% complete...done.

[root@ceph-client ~]# rbd info rbd_pool/volume1 |grep size
        size 800 MiB in 200 objects

# 查看大小,並沒有變化
[root@ceph-client ~]# df -h |tail -1
/dev/rbd0                498M   26M  473M   6% /mnt

[root@ceph-client ~]# xfs_growfs -d /mnt/

# 再次查看大小,在線擴容成功
[root@ceph-client ~]# df -h |tail -1
/dev/rbd0                798M   26M  772M   4% /mnt
  • 塊存儲裁減

不能在線裁減.裁減后需重新格式化再掛載,所以請提前備份好數據

# 再裁減回500M
[root@ceph-client ~]# rbd resize --size 500 rbd_pool/volume1 --allow-shrink
Resizing image: 100% complete...done.

# 重新格式化掛載
[root@ceph-client ~]# umount /mnt/
[root@ceph-client ~]# mkfs.xfs -f /dev/rbd0
[root@ceph-client ~]# mount /dev/rbd0 /mnt/

# 再次查看,確認裁減成功
[root@ceph-client ~]# df -h |tail -1
/dev/rbd0                498M   26M  473M   6% /mnt
1.4.3 刪除塊存儲
[root@ceph-client ~]# umount /mnt/
[root@ceph-client ~]# rbd unmap /dev/rbd0
[root@ceph-client ~]# ceph osd pool delete rbd_pool rbd_pool --yes-i-really-really-mean-it
pool 'rbd_pool' removed

1.5 ceph 對象存儲

1.5.1 測試 ceph 對象網關的連接

第1步: 在ceph-01上創建 rgw (對象存儲網關)

[root@ceph-01 /etc/ceph]# yum install -y ceph-radosgw   # 此步開始已經部署過
[root@ceph-01 /etc/ceph]# ceph-deploy rgw create ceph-01

[root@ceph-01 /etc/ceph]# lsof -i :7480
COMMAND  PID USER   FD   TYPE DEVICE SIZE/OFF NODE NAME
radosgw 5925 ceph   46u  IPv4  28670      0t0  TCP *:7480 (LISTEN)
radosgw 5925 ceph   47u  IPv6  28672      0t0  TCP *:7480 (LISTEN)

http://192.168.99.204:7480

2021-10-07_171413

第2步: 在客戶端測試連接對象網關

# 創建一個測試用戶
[root@ceph-client ~]# radosgw-admin user list
[]

[root@ceph-client ~]# radosgw-admin user create --uid="testuser" --display-name="First User"

{
    "user_id": "testuser",
    "display_name": "First User",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {
            "user": "testuser",
            "access_key": "4O1OMS47IK0196FE4LP1",
            "secret_key": "TQgfHVNigeYcqjzA6tKTShczFHzMVIAaAe9bzqAa"
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

[root@ceph-client ~]# radosgw-admin user list                                            [
    "testuser"
]

# 記下輸出的access_key 和 secret_key的值
# 如果沒有記下也可以通過以下命令查看
$ radosgw-admin user info --uid=testuser
  • 安裝python測試工具
[root@ceph-client ~]# yum install python-boto -y
  • 編寫一個python程序測試
$ cat > s3_test.py <<EOF
import boto
import boto.s3.connection

access_key = ''
secret_key = ''

conn = boto.connect_s3(
        aws_access_key_id = access_key,
        aws_secret_access_key = secret_key,
        host = 'ceph-01', port = 7480,
        is_secure=False, calling_format = boto.s3.connection.OrdinaryCallingFormat(),
        )

bucket = conn.create_bucket('my-new-bucket')
for bucket in conn.get_all_buckets():
        print "{name}".format(
                name = bucket.name,
                created = bucket.creation_date,
)
EOF

[root@ceph-client ~]# vim s3_test.py
import boto
import boto.s3.connection

access_key = '4O1OMS47IK0196FE4LP1'   # 這兩個key和上面創建用戶時的信息對應
secret_key = 'TQgfHVNigeYcqjzA6tKTShczFHzMVIAaAe9bzqAa'

conn = boto.connect_s3(
        aws_access_key_id = access_key,
        aws_secret_access_key = secret_key,
        host = 'ceph-01', port = 7480,   # ceph-client要能解析ceph-01,或者換成ceph-01的ip
        is_secure=False, calling_format = boto.s3.connection.OrdinaryCallingFormat(),
        )

bucket = conn.create_bucket('my-new-bucket')
for bucket in conn.get_all_buckets():
        print "{name}".format(
                name = bucket.name,
                created = bucket.creation_date,
)

# 測試成功
[root@ceph-client ~]# python s3_test.py
my-new-bucket
1.5.2 S3連接ceph對象網關

AmazonS3是一種面向Internet的對象存儲服務.我們這里可以使用s3工具連接ceph的對象存儲進行操作

第1步: 客戶端安裝s3cmd工具,並編寫ceph連接配置文件

[root@ceph-client ~]# yum install s3cmd -y

# 創建並編寫下面的文件,key文件對應前面創建測試用戶的key
[root@ceph-client ~]# vim /root/.s3cfg
[default]
access_key = 4O1OMS47IK0196FE4LP1
secret_key = TQgfHVNigeYcqjzA6tKTShczFHzMVIAaAe9bzqAa
host_base = 192.168.99.204:7480
host_bucket = 192.168.99.204:7480/%(bucket)
cloudfront_host = 192.168.99.204:7480
use_https = False

第2步: 命令測試

# 列出bucket,可以查看到先前測試創建的my-new-bucket
[root@ceph-client ~]# s3cmd ls
2021-07-11 19:41  s3://my-new-bucket

# 再建一個桶
[root@ceph-client ~]# s3cmd mb s3://test_bucket
Bucket 's3://test_bucket/' created

# 上傳文件到桶
[root@ceph-client ~]# s3cmd put /etc/fstab s3://test_bucket
upload: '/etc/fstab' -> 's3://test_bucket/fstab'  [1 of 1]
 541 of 541   100% in    1s   350.03 B/s  done
 
# 下載到當前目錄
[root@ceph-client ~]# s3cmd get s3://test_bucket/fstab
download: 's3://test_bucket/fstab' -> './fstab'  [1 of 1]
 541 of 541   100% in    0s    11.03 KB/s  done

# 更多命令請見參考命令幫助
[root@ceph-client ~]# s3cmd --help

1.6 ceph dashboard

ceph 提供了原生的 Dashboard 功能,通過 ceph dashboard 完成對 ceph 存儲系統可視化監視

**(nautilus版) 需要安裝 ceph-mgr-dashboard **

1、在每個 mgr節點安裝

$ yum install ceph-mgr-dashboard -y
$ ceph mgr versions
$ ps -ef | grep ceph-mgr
$ ceph -s

2、查看 mgr module幫助及模塊信息

$ ceph mgr module --help
$ ceph mgr module ls | head -20

3、開啟 mgr功能、開啟dashboard模塊

$ ceph mgr module enable dashboard
$ ceph mgr module ls | head -20

4、創建自簽名證書

默認情況下,儀表板的所有HTTP連接均使用SSL/TLS進行保護

要快速啟動並運行儀表板,可以使用以下內置命令生成並安裝自簽名證書

[root@ceph-01 /etc/ceph]# ceph dashboard create-self-signed-cert
Self-signed certificate created

5、創建新的訪問控制角色 、創建具有管理員角色的用戶

設置用戶跟密碼

[root@ceph-01 /etc/ceph]# echo admin123 > /root/ceph-password.txt

# 創建用戶admin,密碼指定ceph-password.txt文件,用administrator角色
[root@ceph-01 /etc/ceph]# ceph dashboard ac-user-create admin -i /root/ceph-password.txt administrator
{"username": "admin", "lastUpdate": 1626079263, "name": null, "roles": ["administrator"], "password": "$2b$12$zlN6AOugMKWqn4l680QEje8.Ny12XT7WHoBN4oeEceLLndjR.xlRi", "email": null}

# 顯示用戶信息
[root@ceph-01 /etc/ceph]# ceph dashboard ac-user-show
["admin"]

# 顯示角色信息
[root@ceph-01 /etc/ceph]# ceph dashboard ac-role-show
["administrator", "pool-manager", "cephfs-manager", "cluster-manager", "block-manager", "read-only", "rgw-manager", "ganesha-manager"]

# 刪除用戶命令
[root@ceph-01 /etc/ceph]# ceph dashboard ac-user-delete admin
User 'admin' deleted
[root@ceph-01 /etc/ceph]# ceph dashboard ac-user-show
[]

# 更多命令請見參考命令幫助
[root@ceph-01 /etc/ceph]# ceph dashboard -h

6、在 ceph active mgr節點上配置 mgr services

主要配置 dashboard 使用的IP及Port

#查看到ceph active mgr節點是ceph-01
[root@ceph-01 /etc/ceph]# ceph -s |grep mgr
    mgr: ceph-01(active, since 44m), standbys: ceph-02, ceph-03

#查看默認的ceph-mgr服務
[root@ceph-01 /etc/ceph]# ceph mgr services
{
    "dashboard": "https://ceph-01:8443/"
}

此時可以直接使用默認的service訪問ceph-dashboard的web頁面

https://ceph-01:8443/ or https://192.168.99.204:8443/

輸入賬號密碼登錄(admin、admin123)

如果是虛擬機,用域名訪問需要解析!!!

在C:\Windows\System32\drivers\etc\hosts文件里添加192.168.99.204 ceph-01

  • 自定義監聽ip及端口
#可修改為
[root@ceph-01 /etc/ceph]# ceph config set mgr mgr/dashboard/server_addr 192.168.99.204

[root@ceph-01 /etc/ceph]# ceph config set mgr mgr/dashboard/server_port 8080

#修改后查看沒有變化,需要重啟dashboard
[root@ceph-01 /etc/ceph]# ceph mgr services
{
"dashboard": "https://ceph-01:8443/"
}

# 使用配置生效
[root@ceph-01 /etc/ceph]# ceph mgr module disable dashboard
[root@ceph-01 /etc/ceph]# ceph mgr module enable dashboard
[root@ceph-01 /etc/ceph]# ceph mgr services
{
"dashboard": "https://192.168.99.204:8080/"
}

禁用ssl

直接用http,如果想要用https的話,不需要操作這一步

[root@ceph-01 /etc/ceph]# ceph config ls |grep mgr/dashboard/ssl
mgr/dashboard/ssl

[root@ceph-01 /etc/ceph]# ceph config set mgr mgr/dashboard/ssl false

dashboard 啟用 RGW ,開啟 Object Gateway 管理功能

Ceph Dashboard默認安裝好后,沒有啟用rgw,需要手動啟用RGW

部署 rgw

# 全部節點安裝,達到高可用
$ yum install ceph-radosgw -y #開始已經部署過
$ ceph -s

[root@ceph-01 /etc/ceph]# ceph-deploy rgw create ceph-01 ceph-02 ceph-03

創建 rgw 系統賬戶

[root@ceph-01 /etc/ceph]# radosgw-admin user list                   [
    "testuser"
]

[root@ceph-01 /etc/ceph]# radosgw-admin user create --uid=rgw --display-name=rgw  --system 

{
    "user_id": "rgw",
    "display_name": "rgw",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {
            "user": "rgw",
            "access_key": "QNJI1APRKX691UJ2R9B3",
            "secret_key": "u5g1JtnCotNjE1H9MMerLc7QefW8xK8PLiw7ZGUs"
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "system": "true",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

[root@ceph-01 /etc/ceph]# radosgw-admin user list
[
    "rgw",
    "testuser"
]

# 記下輸出的access_key 和 secret_key的值
# 如果沒有記下也可以通過以下命令查看
$ radosgw-admin user info --uid=rgw

設置access_key 和 secret_key

# 寫入access_key的值
[root@ceph-01 /etc/ceph]# echo QNJI1APRKX691UJ2R9B3 > access_key 

# 寫入secret_key的值
[root@ceph-01 /etc/ceph]# echo u5g1JtnCotNjE1H9MMerLc7QefW8xK8PLiw7ZGUs > secret_key 

# 提供Dashboard證書
[root@ceph-01 /etc/ceph]# ceph dashboard set-rgw-api-access-key -i access_key
Option RGW_API_ACCESS_KEY updated

[root@ceph-01 /etc/ceph]# ceph dashboard set-rgw-api-secret-key -i secret_key
Option RGW_API_SECRET_KEY updated

禁用ssl

直接用http,如果想要用https的話,不需要操作這一步

# ceph dashboard set-rgw-api-ssl-verify False

1、這時候打開 dashboard 刷新就可以看到 rgw 的信息了

1.7 prometheus + grafana 監控 ceph

這里在 ceph-01 安裝 promethus + grafana

1.7.1 安裝grafana
# 1、配置yum源文件
[root@ceph-01 /etc/ceph]# cat > /etc/yum.repos.d/grafana.repo << EOF
[grafana]
name=grafana
baseurl=https://packages.grafana.com/oss/rpm
repo_gpgcheck=1
enabled=1
gpgcheck=1
gpgkey=https://packages.grafana.com/gpg.key
sslverify=1
sslcacert=/etc/pki/tls/certs/ca-bundle.crt
EOF

# 2.通過yum命令安裝grafana
[root@ceph-01 /etc/ceph]# yum install grafana -y 

# 3.啟動grafana並設為開機自啟
[root@ceph-01 /etc/ceph]# systemctl enable grafana-server --now

[root@ceph-01 /etc/ceph]# grafana-server -v
Version 8.0.5 (commit: cbb2aa5001, branch: HEAD)
[root@ceph-01 /etc/ceph]# grafana-cli -v
Grafana CLI version 8.0.5
1.7.2 安裝prometheus
# 1、下載安裝包,下載地址https://prometheus.io/download/

[root@ceph-01 ~]# wget https://github.com/prometheus/prometheus/releases/download/v2.28.1/prometheus-2.28.1.linux-amd64.tar.gz

# 2、解壓壓縮包
[root@ceph-01 ~]# tar xf prometheus-2.28.1.linux-amd64.tar.gz

# 3、將解壓后的目錄改名
[root@ceph-01 ~]# mv prometheus-2.28.1.linux-amd64 /usr/local/prometheus
[root@ceph-01 ~]# cd /usr/local/prometheus

# 4、查看promethus版本
[root@ceph-01 /usr/local/prometheus]# ./prometheus --version

# 5、配置系統服務啟動
[root@ceph-01 /usr/local/prometheus]# cat > /etc/systemd/system/prometheus.service << EOF
[Unit]
Description=Prometheus Monitoring System
Documentation=Prometheus Monitoring System

[Service]
ExecStart=/usr/local/prometheus/prometheus \
  --config.file /usr/local/prometheus/prometheus.yml \
  --web.listen-address=:9090

[Install]
WantedBy=multi-user.target
EOF

# 6、加載系統服務
[root@ceph-01 /usr/local/prometheus]# systemctl daemon-reload

# 7、啟動服務和添加開機自啟動
[root@ceph-01 /usr/local/prometheus]# systemctl enable prometheus --now
[root@ceph-01 /usr/local/prometheus]# systemctl status prometheus
1.7.3 ceph mgr prometheus插件配置
[root@ceph-01 /usr/local/prometheus]# ceph mgr module enable prometheus
[root@ceph-01 /usr/local/prometheus]# ceph mgr module ls | head -20

# 這里查看到是ceph-01
[root@ceph-01 /usr/local/prometheus]# ceph -s |grep mgr
    mgr: ceph-01(active, since 73s), standbys: ceph-02, ceph-03
    
[root@ceph-01 /usr/local/prometheus]# netstat -nltp | grep mgr 
# 檢查端口
[root@ceph-01 /usr/local/prometheus]# curl 127.0.0.1:9283/metrics  
# 測試返回值
1.7.4 配置promethus

1、在 scrape_configs: 配置項下添加

[root@ceph-01 /usr/local/prometheus]# cat >> /usr/local/prometheus/prometheus.yml << EOF
  - job_name: 'ceph_cluster'
    static_configs:
    - targets: ['192.168.99.204:9283']
EOF

注意192.168.99.204:9283這個是正在運行mgr的ip
[root@ceph-01 /usr/local/prometheus]# ceph -s |grep mgr
    mgr: ceph-01(active, since 62m), standbys: ceph-02

2、重啟promethus服務

[root@ceph-01 /usr/local/prometheus]# systemctl restart prometheus
[root@ceph-01 /usr/local/prometheus]# systemctl status prometheus

3、檢查prometheus服務器中是否添加成功

# 瀏覽器-》 http://192.168.99.204:9090 -》status -》Targets

4、配置grafana

URL:http://192.168.99.204:3000

默認登陸的用戶名密碼為admin admin,登陸成功后會強制修改密碼自己修改后的密碼為admin123

https://grafana.com/grafana/dashboards?search=ceph

917 、2842

1、瀏覽器登錄 grafana 管理界面

2、添加prometheus的data sources,點擊configuration--》data sources

3、添加dashboard,打開官網https://grafana.com/grafana/dashboards?search=ceph 選擇合適的dashboard,記錄編號,點擊import導入儀表板模板

1、添加prometheus

2021-10-07_182755
2021-10-07_182815
2021-10-07_194616
2021-10-07_194716
2021-10-07_194743

2、添加ceph-dashboard

2021-10-07_194916
2021-10-07_195010
2021-10-07_195029
2021-10-07_195052
2021-10-07_195110

二、k8s對接外部ceph集群

k8s可以通過兩種方式使用ceph作為volume:

  • cephfs
  • rbd

1)一個ceph集群僅支持一個cephfs

2)cephfs方式支持k8s的pv的3種訪問模式ReadWriteOnce、ReadOnlyMany 、ReadWriteMany

3)rbd支持ReadWriteOnce、ReadOnlyMany兩種模式

注意:訪問模式只是能力描述,並不是強制執行的,對於沒有按pvc聲明的方式使用pv,存儲提供者應該負責訪問時的運行錯誤。例如如果設置pvc的訪問模式為ReadOnlyMany ,pod掛載后依然可寫,如果需要真正的不可寫,申請pvc是需要指定 readOnly: true 參數

2.1 靜態PV(rbd)方式

2.1.1 安裝依賴組件(所有k8s組件)

注意:安裝ceph-common軟件包推薦使用軟件包源與Ceph集群源相同,軟件版本一致。

$ cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
priority=1
EOF
$ yum install ceph-common -y
2.1.2 同步ceph配置文件
[root@ceph-01 ~]# ssh-copy-id k8s-master
[root@ceph-01 ~]# ssh-copy-id k8s-node01
[root@ceph-01 ~]# ssh-copy-id k8s-node02

[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03 ceph-client k8s-master k8s-node01 k8s-node02
2.1.3 創建存儲池並開啟rbd功能
$ ceph osd pool create kube 128 128
# 創建kube池給k8s使用
2.1.4 創建ceph用戶,提供給k8s使用
$ ceph auth list
# 查看ceph集群中的認證用戶及相關的key

$ ceph auth del osd.0    # (這里只是給個刪除用戶命令,請勿執行啊!!)
# 刪除集群中的一個認證用戶

$ ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=kube'
# 創建ceph用戶,提供給k8s使用
2.1.5 創建secret資源
$ ceph auth get-key client.admin | base64
$ ceph auth get-key client.kube | base64

base64 單向加密一下,k8s不以明文方式存儲賬號密碼

$ mkdir jtpv && cd jtpv

$ cat > ceph-admin-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: ceph-admin-secret
  namespace: default
data:
  key:  #( admin 的key)
type:
  kubernetes.io/rbd
EOF

$ cat > ceph-kube-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: ceph-kube-secret
  namespace: default
data:
  key:  #( kube 的key)
type:
  kubernetes.io/rbd
EOF

$ cat > pv.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: ceph-pv-test
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  rbd:
    monitors:
      - 192.168.99.204:6789
      - 192.168.99.205:6789
      - 192.168.99.206:6789
    pool: kube
    image: ceph-image
    user: admin
    secretRef:
      name: ceph-admin-secret
    fsType: ext4
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
EOF

$ rbd create -p kube -s 5G ceph-image
# 創建鏡像(說白了就是划出一塊空間給它用的意思)

$ rbd ls -p kube
ceph-image
$ rbd info ceph-image -p kube
$ rbd feature disable kube/ceph-image object-map fast-diff deep-flatten
# 去除不支持的特性

$ cat > pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ceph-test-claim
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
EOF

$ cat > pod.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
  name: ceph-pod
spec:
  containers:
  - name: test-pod
    image: busybox:1.24
    command: ["sleep", "60000"]
    volumeMounts:
    - name: pvc
      mountPath: /usr/share/busybox
      readOnly: false
  volumes:
    - name: pvc
      persistentVolumeClaim:
        claimName: ceph-test-claim
EOF

驗證:

$ kubectl apply -f ceph-admin-secret.yaml
$ kubectl apply -f ceph-kube-secret.yaml
$ kubectl apply -f pv.yaml
$ kubectl apply -f pvc.yaml
$ kubectl apply -f pod.yaml

$ kubectl exec -it ceph-pod  -- df -h |grep /dev/rbd0
/dev/rbd0                 4.8G     20.0M      4.8G   0% /usr/share/busybox

2.2 動態pv(cephfs)方式

2.2.1 ceph操作

1、在 ceph-01 部署節點上同步配置文件,並創建至少一個 mds 服務

使用 cephfs 必須保證至少有一個節點提供 mds 服務

[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03

#這里做三個mds
[root@ceph-01 /etc/ceph]# ceph-deploy mds create ceph-01 ceph-02 ceph-03

[root@ceph-01 /etc/ceph]# ceph -s

2、創建存儲池 、文件系統

1、創建 cephfs 存儲池 :fs_metadata 、fs_data

2、創建 cephfs 文件系統:命名為 cephfs

一個 ceph 文件系統需要至少兩個 RADOS 存儲池,一個用於數據,一個用於元數據。

$ ceph osd pool create fs_data 128 128
$ ceph osd pool create fs_metadata 128 128
$ ceph fs new cephfs fs_metadata fs_data
$ ceph fs ls

獲取集群信息和查看 admin 用戶 key(秘鑰)

$ ceph mon dump
$ ceph auth get client.admin

注意:這里不需要 base64 加密

2.2.2 k8s操作部分

1、所有 k8s節點安裝依賴組件

$ cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
priority=1
EOF
$ yum install ceph-common -y

2、需要把 ceph 集群的 /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 文件同步到 k8s 所有節點上

[root@ceph-01 ~]# ssh-copy-id k8s-master
[root@ceph-01 ~]# ssh-copy-id k8s-node01
[root@ceph-01 ~]# ssh-copy-id k8s-node02

[root@ceph-01 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph-01 ceph-02 ceph-03 ceph-client k8s-master k8s-node01 k8s-node02
2.2.3 部署cephfs csi

gitlab官方倉庫

$ mkdir -p /root/my-ceph-csi/deploy/cephfs && cd /root/my-ceph-csi/deploy/cephfs
# 創建所需目錄

修改 csi-config-map.yaml 文件,配置連接 ceph 集群的信息

$ cat > csi-config-map.yaml << EOF
---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "96f54e84-dbfc-4650-8896-8f3b5f524bbf", # ceph集群的ID,此內容可以使用ceph mon dump來查看,clusterID對應fsid
        "monitors": [
          "192.168.99.204:6789,192.168.99.205:6789,192.168.99.206:6789"
        ]
      }
    ] 
metadata:
  name: ceph-csi-config
EOF

需要把注釋去掉,不然容易報錯!!!

如果你需要部署到xxx命名空間里,需要把csi-provisioner-rbac.yaml和csi-nodeplugin-rbac.yaml里面的命名空間改為xxx,這里使用yaml文件里面默認的default命名空間

部署 cephfs 相關的 csi

$ cat > csi-provisioner-rbac.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-csi-provisioner

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-external-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get", "list"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshots"]
    verbs: ["get", "list"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents"]
    verbs: ["create", "get", "list", "watch", "update", "delete"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["volumeattachments"]
    verbs: ["get", "list", "watch", "update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["volumeattachments/status"]
    verbs: ["patch"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims/status"]
    verbs: ["update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["csinodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents/status"]
    verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-provisioner-role
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: cephfs-external-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  # replace with non-default namespace name
  namespace: default
  name: cephfs-external-provisioner-cfg
rules:
  # remove this once we stop supporting v1.0.0
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get", "list", "create", "delete"]
  - apiGroups: ["coordination.k8s.io"]
    resources: ["leases"]
    verbs: ["get", "watch", "list", "delete", "update", "create"]

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-provisioner-role-cfg
  # replace with non-default namespace name
  namespace: default
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-provisioner
    # replace with non-default namespace name
    namespace: default
roleRef:
  kind: Role
  name: cephfs-external-provisioner-cfg
  apiGroup: rbac.authorization.k8s.io
EOF

$ cat > csi-nodeplugin-rbac.yaml << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-csi-nodeplugin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-nodeplugin
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-nodeplugin
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-nodeplugin
    namespace: default
roleRef:
  kind: ClusterRole
  name: cephfs-csi-nodeplugin
  apiGroup: rbac.authorization.k8s.io
EOF

$ vim csi-cephfsplugin-provisioner.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: csi-cephfsplugin-provisioner
  labels:
    app: csi-metrics
spec:
  selector:
    app: csi-cephfsplugin-provisioner
  ports:
    - name: http-metrics
      port: 8080
      protocol: TCP
      targetPort: 8681

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: csi-cephfsplugin-provisioner
spec:
  selector:
    matchLabels:
      app: csi-cephfsplugin-provisioner
  replicas: 3
  template:
    metadata:
      labels:
        app: csi-cephfsplugin-provisioner
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: app
                    operator: In
                    values:
                      - csi-cephfsplugin-provisioner
              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: cephfs-csi-provisioner
      priorityClassName: system-cluster-critical
      containers:
        - name: csi-provisioner
          image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=5"
            - "--timeout=150s"
            - "--leader-election=true"
            - "--retry-interval-start=500ms"
            - "--feature-gates=Topology=false"
            - "--extra-create-metadata=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-resizer
          image: k8s.gcr.io/sig-storage/csi-resizer:v1.2.0
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=5"
            - "--timeout=150s"
            - "--leader-election"
            - "--retry-interval-start=500ms"
            - "--handle-volume-inuse-error=false"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-snapshotter
          image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=5"
            - "--timeout=150s"
            - "--leader-election=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          securityContext:
            privileged: true
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-cephfsplugin-attacher
          image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1
          args:
            - "--v=5"
            - "--csi-address=$(ADDRESS)"
            - "--leader-election=true"
            - "--retry-interval-start=500ms"
          env:
            - name: ADDRESS
              value: /csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-cephfsplugin
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
          # for stable functionality replace canary with latest release version
          image: quay.io/cephcsi/cephcsi:canary
          args:
            - "--nodeid=$(NODE_ID)"
            - "--type=cephfs"
            - "--controllerserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--v=5"
            - "--drivername=cephfs.csi.ceph.com"
            - "--pidlimit=-1"
            - "--enableprofiling=false"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: host-sys
              mountPath: /sys
            - name: lib-modules
              mountPath: /lib/modules
              readOnly: true
            - name: host-dev
              mountPath: /dev
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
        - name: liveness-prometheus
          image: quay.io/cephcsi/cephcsi:canary
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8681"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: socket-dir
          emptyDir: {
            medium: "Memory"
          }
        - name: host-sys
          hostPath:
            path: /sys
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: host-dev
          hostPath:
            path: /dev
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }


$ vim csi-cephfsplugin.yaml
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: csi-cephfsplugin
spec:
  selector:
    matchLabels:
      app: csi-cephfsplugin
  template:
    metadata:
      labels:
        app: csi-cephfsplugin
    spec:
      serviceAccountName: cephfs-csi-nodeplugin
      priorityClassName: system-node-critical
      hostNetwork: true
      # to use e.g. Rook orchestrated cluster, and mons' FQDN is
      # resolved through k8s service, set dns policy to cluster first
      dnsPolicy: ClusterFirstWithHostNet
      containers:
        - name: driver-registrar
          # This is necessary only for systems with SELinux, where
          # non-privileged sidecar containers cannot access unix domain socket
          # created by privileged CSI driver container.
          securityContext:
            privileged: true
          image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0
          args:
            - "--v=5"
            - "--csi-address=/csi/csi.sock"
            - "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
          env:
            - name: KUBE_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: registration-dir
              mountPath: /registration
        - name: csi-cephfsplugin
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
            allowPrivilegeEscalation: true
          # for stable functionality replace canary with latest release version
          image: quay.io/cephcsi/cephcsi:canary
          args:
            - "--nodeid=$(NODE_ID)"
            - "--type=cephfs"
            - "--nodeserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--v=5"
            - "--drivername=cephfs.csi.ceph.com"
            - "--enableprofiling=false"
            # If topology based provisioning is desired, configure required
            # node labels representing the nodes topology domain
            # and pass the label names below, for CSI to consume and advertise
            # its equivalent topology domain
            # - "--domainlabels=failure-domain/region,failure-domain/zone"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: mountpoint-dir
              mountPath: /var/lib/kubelet/pods
              mountPropagation: Bidirectional
            - name: plugin-dir
              mountPath: /var/lib/kubelet/plugins
              mountPropagation: "Bidirectional"
            - name: host-sys
              mountPath: /sys
            - name: lib-modules
              mountPath: /lib/modules
              readOnly: true
            - name: host-dev
              mountPath: /dev
            - name: host-mount
              mountPath: /run/mount
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
        - name: liveness-prometheus
          securityContext:
            privileged: true
          image: quay.io/cephcsi/cephcsi:canary
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8681"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: socket-dir
          hostPath:
            path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
            type: DirectoryOrCreate
        - name: registration-dir
          hostPath:
            path: /var/lib/kubelet/plugins_registry/
            type: Directory
        - name: mountpoint-dir
          hostPath:
            path: /var/lib/kubelet/pods
            type: DirectoryOrCreate
        - name: plugin-dir
          hostPath:
            path: /var/lib/kubelet/plugins
            type: Directory
        - name: host-sys
          hostPath:
            path: /sys
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: host-dev
          hostPath:
            path: /dev
        - name: host-mount
          hostPath:
            path: /run/mount
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
  name: csi-metrics-cephfsplugin
  labels:
    app: csi-metrics
spec:
  ports:
    - name: http-metrics
      port: 8080
      protocol: TCP
      targetPort: 8681
  selector:
    app: csi-cephfsplugin

先離線把鏡像導入進去(k8s所有節點)

離線鏡像百度網盤下載鏈接:https://pan.baidu.com/s/1xp2cJTDD-KR2hYqTD3L9QQ
提取碼:m66b

$ grep image csi-cephfsplugin-provisioner.yaml 
# 查看所需鏡像

# 上傳鏡像包到服務上
$ unzip '*.zip'
$ ls *tar |xargs -i docker load -i {} && docker images

因為這里k8s集群只有三台,所以必須配置k8s的master運行pod

# 查看master表示不運行pod
[root@master ~]# kubectl describe node k8s-master |grep Taints
Taints: node-role.kubernetes.io/k8s-master:NoSchedule
# 查看master表示運行pod
[root@master ~]# kubectl describe node k8s-master |grep Taints
Taints: <none>
# 讓master節點參與pod負載的命令為
[root@master ~]# kubectl taint nodes k8s-master node-role.kubernetes.io/master-
# 讓master節點恢復不參與pod負載的命令為 
[root@master ~]# kubectl taint nodes k8s-master node-role.kubernetes.io/master=:NoSchedule

部署

$ kubectl apply -f csi-config-map.yaml
$ kubectl create -f csi-provisioner-rbac.yaml
$ kubectl create -f csi-nodeplugin-rbac.yaml
$ kubectl create -f csi-cephfsplugin-provisioner.yaml
$ kubectl create -f csi-cephfsplugin.yaml

驗證

1、k8s 上創建連接 ceph 集群的秘鑰(創建 secret.yaml)

$ cat > secret.yaml << EOF
---
apiVersion: v1
kind: Secret
metadata:
  name: csi-cephfs-secret
  namespace: default
stringData:
  # 通過ceph auth get client.admin查看,這里不需要base64加密
  # Required for statically provisioned volumes
  userID: admin
  userKey: AQCoAPBg9LQTMhAALBgNqW3DDcaAm9NL6HFzaA==

  # Required for dynamically provisioned volumes
  adminID: admin
  adminKey: AQCoAPBg9LQTMhAALBgNqW3DDcaAm9NL6HFzaA==
EOF

$ kubectl apply -f secret.yaml

$ kubectl get secret csi-cephfs-secret -n default

2、創建存儲類(創建 storageclass.yaml)

修改字段1(clusterID:)、改成自己ceph集群的ID,ceph mon dump

修改字段2(fsName:)、填寫上面創建名為cephfs的文件系統 ,ceph fs ls

修改字段3(pool:)、去掉注釋,填寫數據pool ,不是元數據的pool ceph osd pool ls

$ cat > storageclass.yaml <<EOF
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
  clusterID: 96f54e84-dbfc-4650-8896-8f3b5f524bbf #此處需要修改
  fsName: cephfs #此處需要修改
  pool: fs_data #此處需要修改
  csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/provisioner-secret-namespace: default
  csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/controller-expand-secret-namespace: default
  csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
  - debug
EOF
  
$ kubectl apply -f storageclass.yaml

$ kubectl get sc csi-cephfs-sc -n default

3、基於 sc 創建 pvc

$ cat > pvc.yaml << EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: csi-cephfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  storageClassName: csi-cephfs-sc
EOF

$ kubectl apply -f pvc.yaml

$ kubectl get pvc

4、創建 pod 應用 pvc

$ cat > pod.yaml << EOF
---
apiVersion: v1
kind: Pod
metadata:
  name: csi-cephfs-demo-pod
spec:
  containers:
    - name: web-server
      image: nginx:alpine
      volumeMounts:
        - name: mypvc
          mountPath: /var/lib/www
  volumes:
    - name: mypvc
      persistentVolumeClaim:
        claimName: csi-cephfs-pvc
        readOnly: false
EOF

$ kubectl apply -f pod.yaml

$ kubectl get pod csi-cephfs-demo-pod -n default

查看數據存放目錄

$ kubectl exec -it csi-cephfs-demo-pod -- df -Th | grep ceph
192.168.99.204:6789,192.168.99.205:6789,192.168.99.206:6789:/volumes/csi/csi-vol-ba73d3f4-2f3a-11ec-8925-865ed536e16d/79a41bdb-9bfb-4d70-a98c-487a7f8ba04d
                     ceph            1.0G         0      1.0G   0% /var/lib/www


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM