四、Ceph的存儲使用


一、RBD

1、RBD介紹

Ceph 可以同時提供對象存儲RADOSGW、塊存儲RBD、文件系統存儲Ceph FS,RBD 即RADOS Block Device 的簡稱,RBD 塊存儲是常用的存儲類型之一,RBD 塊設備類似磁盤可以被掛載,RBD 塊設備具有快照、多副本、克隆和一致性等特性,數據以條帶化的方式存儲在Ceph 集群的多個OSD 中。

條帶化技術就是一種自動的將I/O 的負載均衡到多個物理磁盤上的技術,條帶化技術就是將一塊連續的數據分成很多小部分並把他們分別存儲到不同磁盤上去。這就能使多個進程同時訪問數據的多個不同部分而不會造成磁盤沖突,而且在需要對這種數據進行順序訪問的時候可以獲得最大程度上的I/O 並行能力,從而獲得非常好的性能。

  

2、ceph端配置rbd

1)、創建一個pool用於提供給rbd

ceph osd pool create <pool_name> pg_mun pgp_mun
pgp是對pg的數據進行組合存儲,pgp通常等於pg

ceph osd pool create myrbd1 64 64 

  

2)、給創建的pool開啟rbd功能

ceph osd pool application enable myrbd1 rbd 

  

3)、使用rbd命令初始化pool

rbd pool init -p myrbd1

4)、創建img

rbd存儲池並不能直接用於塊設備,而是需要事先在其中按需創建映像(image),並把映像文件作為塊設備使用,rbd 命令可用於創建、查看及刪除塊設備相在的映像(image),以及克隆映像、創建快照、將映像回滾到快照和查看快照等管理操作,例如,下面的命令能夠創建一個名為myimg1 的映像。
在myrbd1的pool中創建一個mying1的rbd,其大小為5G
rbd create mying1 --size 5G --pool myrbd1

后續步驟會使用myimg2 ,由於centos 系統內核較低無法掛載使用,因此只開啟部分特性。除了layering 其他特性需要高版本內核支持
rbd create myimg2 --size 3G --pool myrbd1 --image-format 2 --image-feature layering

5)、查看pool中所存在的img

cephadmin@ceph-deploy:~/ceph-cluster$ rbd ls --pool myrbd1
mying1
mying2

6)、查看指定img的信息

cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image mying1 --pool myrbd1 info
rbd image 'mying1':
    size 5 GiB in 1280 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 144f88aecd24
    block_name_prefix: rbd_data.144f88aecd24
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #默認開啟所有特性
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:08:32 2021
    access_timestamp: Fri Aug 20 22:08:32 2021
    modify_timestamp: Fri Aug 20 22:08:32 2021

cephadmin@ceph-deploy:~/ceph-cluster$ rbd --image mying2 --pool myrbd1 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering  #手動配置僅開啟layering唯一特性
    op_features:  
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

3、客戶端使用rbd

1)、當前ceph的使用狀態

cephadmin@ceph-deploy:~/ceph-cluster$ ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    300 GiB  300 GiB  168 MiB   168 MiB       0.05
TOTAL  300 GiB  300 GiB  168 MiB   168 MiB       0.05

--- POOLS ---
POOL                   ID  PGS  STORED  OBJECTS    USED  %USED  MAX AVAIL
device_health_metrics   1    1     0 B        0     0 B      0     95 GiB
myrbd1                  2   64   405 B        7  48 KiB      0     95 GiB

2)、客戶端配置ceph源

wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/source.list
apt update

3)、安裝ceph-common

 
apt install -y ceph-common

4)、從部署服務器同步admin認證文件到client的/etc/ceph目錄

 
cephadmin@ceph-deploy:~/ceph-cluster$ sudo scp ceph.conf ceph.client.admin.keyring root@172.168.32.111:/etc/ceph/

5)、在客戶端映射img

 
root@client:~# rbd --pool myrbd1 map mying2
/dev/rbd0

root@client:~# fdisk -l | grep rbd0
Disk /dev/rbd0: 3 GiB, 3221225472 bytes, 6291456 sectors

6)、在client端格式化rbd0並掛載到client端的mnt目錄

root@client:~# mkfs.xfs /dev/rbd0
root@client:~# mount /dev/rbd0 /mnt
root@client:~# df -TH|grep rbd0
/dev/rbd0      xfs       3.3G   38M  3.2G   2% /mnt

7)、創建一個300M的文件

root@client:~# dd if=/dev/zero of=/mnt/rbd_test bs=1M count=300
root@client:~# ll -h /mnt/rbd_test 
-rw-r--r-- 1 root root 300M Aug 20 22:40 /mnt/rbd_test

8)、在ceph-deploy端驗證rbd中的數據

cephadmin@ceph-deploy:~/ceph-cluster$ ceph df
--- RAW STORAGE ---
CLASS     SIZE    AVAIL     USED  RAW USED  %RAW USED
hdd    300 GiB  298 GiB  2.0 GiB   2.0 GiB       0.67
TOTAL  300 GiB  298 GiB  2.0 GiB   2.0 GiB       0.67

--- POOLS ---
POOL                   ID  PGS   STORED  OBJECTS     USED  %USED  MAX AVAIL
device_health_metrics   1    1      0 B        0      0 B      0     94 GiB
myrbd1                  2   64  310 MiB       95  931 MiB   0.32     94 GiB #myrbd1的pool中已經使用了300M數據

4、使用普通用戶掛載rbd

1)、在ceph-deploy上創建普通賬號及權限

 
cephadmin@ceph-deploy:/etc/ceph$ ceph auth get-or-create client.kaka mon 'allow r' osd 'allow rwx pool=myrbd1'
[client.kaka]
    key = AQC/ISNhUdzSDBAAgD7q6bPsEN0ymTh6B7rw8g==
cephadmin@ceph-deploy:/etc/ceph$ ceph auth get client.kaka
[client.kaka]
    key = AQC/ISNhUdzSDBAAgD7q6bPsEN0ymTh6B7rw8g==
    caps mon = "allow r"
    caps osd = "allow rwx pool=myrbd1"
exported keyring for client.kaka

2)、在ceph-deploy上創建普通用戶client.ywx的keyring文件

 
#創建空的keyring文件
cephadmin@ceph-deploy:/etc/ceph$ sudo ceph-authtool --create-keyring ceph.client.kaka.keyring
creating ceph.client.kaka.keyring
#導入client.kaka信息進入key.ring文件
cephadmin@ceph-deploy:/etc/ceph$ sudo ceph auth get client.kaka -o ceph.client.kaka.keyring 
exported keyring for client.kaka

cephadmin@ceph-deploy:/etc/ceph$ sudo ceph-authtool -l ceph.client.kaka.keyring 
[client.kaka]
    key = AQC/ISNhUdzSDBAAgD7q6bPsEN0ymTh6B7rw8g==
    caps mon = "allow r"
    caps osd = "allow rwx pool=myrbd1"

3)、在客戶端安裝ceph-common

#1、安裝ceph源
root@client:~#wget -q -O- 'https://mirrors.tuna.tsinghua.edu.cn/ceph/keys/release.asc' | sudo apt-key add -
root@client:~#echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific bionic main" >> /etc/apt/source.list
root@client:~#apt update
#2、安裝ceph-common
root@client:~#apt install ceph-common

4)、在ceph-deploy同步普通用戶認證文件到客戶端

#同步ceph.conf和ceph.client.kaka.keyring到客戶端
cephadmin@ceph-deploy:/etc/ceph$ sudo scp ceph.conf ceph.client.kaka.keyring 172.168.32.111:/etc/ceph/
#客戶端驗證
root@client:/etc/ceph# ls
ceph.client.admin.keyring  ceph.client.kaka.keyring  ceph.client.ywx.keyring  ceph.conf  rbdmap

#權限驗證
root@client:/etc/ceph# ceph --user kaka -s
  cluster:
    id:     f0e7c394-989b-4803-86c3-5557ae25e814
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 10h)
    mgr: ceph-mgr01(active, since 10h), standbys: ceph-mgr02
    osd: 16 osds: 11 up (since 10h), 11 in (since 10h)
 
  data:
    pools:   2 pools, 65 pgs
    objects: 94 objects, 310 MiB
    usage:   2.0 GiB used, 218 GiB / 220 GiB avail
    pgs:     65 active+clean

5)、使用普通用戶映射rbd

使用前面創建的mying1的img文件

root@client:/etc/ceph# rbd --user kaka --pool myrbd1 map mying1
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable myrbd1/mying1 object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
rbd: --user is deprecated, use --id
#rbd文件映射失敗,問題為客戶端的kernel版本過低,不支持object-map fast-diff deep-flatten等參數。
#ubuntu20.04支持上述參數

在ceph-deploy上創建img文件mying3,不添加object-map fast-diff deep-flatten等參數。

#創建img文件mying3,image-format格式為2,image特性值開啟layering
cephadmin@ceph-deploy:/etc/ceph$ rbd create mying3 --size 3G --pool myrbd1 --image-format 2 --image-feature layering
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1
mying1
mying2
mying3

在客戶端上重新映射mying3

root@client:/etc/ceph# rbd --user kaka --pool myrbd1 map mying3
/dev/rbd1
#驗證,rbd1掛載成功
root@client:/etc/ceph# fdisk -l /dev/rbd1
Disk /dev/rbd1: 3 GiB, 3221225472 bytes, 6291456 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
#可以格式化使用

6)、格式化並使用該rbd

root@client:/etc/ceph# fdisk -l
Disk /dev/sda: 50 GiB, 53687091200 bytes, 104857600 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x870c4380

Device     Boot Start       End   Sectors Size Id Type
/dev/sda1  *     2048 104855551 104853504  50G 83 Linux


Disk /dev/rbd1: 3 GiB, 3221225472 bytes, 6291456 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes

#格式化rbd1磁盤
root@client:/etc/ceph# mkfs.ext4 /dev/rbd1
mke2fs 1.44.1 (24-Mar-2018)
Discarding device blocks: done                            
Creating filesystem with 786432 4k blocks and 196608 inodes
Filesystem UUID: dae3f414-ceae-4535-97d4-c369820f3116
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information:      
done

#把/dev/rbd1掛載到/mnt目錄
root@client:/etc/ceph# mount /dev/rbd1 /mnt
root@client:/etc/ceph# mount |grep /dev/rbd1
/dev/rbd1 on /mnt type ext4 (rw,relatime,stripe=1024,data=ordered)

#在/mnt中創建一個200M的文件
root@client:/etc/ceph# cd /mnt
root@client:/mnt# 
root@client:/mnt# 
root@client:/mnt# dd if=/dev/zero of=/mnt/rbd-test bs=1M count=200
200+0 records in
200+0 records out
209715200 bytes (210 MB, 200 MiB) copied, 0.205969 s, 1.0 GB/s

root@client:/mnt# ll -h
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test

7)、rbd掛載成功后,系統會自動加載ceph的內核模塊

 
#掛載rbd 之后系統內核會自動加載libceph.ko 模塊
root@client:/mnt# lsmod |grep ceph
libceph               315392  1 rbd
libcrc32c              16384  3 xfs,raid456,libceph

8)、rbd 鏡像空間拉伸

可以擴展空間,不建議縮小空間

#當前mying3的空間
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying1  5 GiB            2            
mying2  3 GiB            2            
mying3  3 GiB            2   
#拉伸mying3的鏡像為8G
cephadmin@ceph-deploy:/etc/ceph$ rbd resize --pool myrbd1 --image mying3 --size 8G
Resizing image: 100% complete...done.
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying1  5 GiB            2            
mying2  3 GiB            2            
mying3  8 GiB            2  

#在客戶端可以發現/dev/rbd1已經是8G了,但是文件系統還是3G
root@client:/mnt# df -Th /mnt
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  2.9G  209M  2.6G   8% /mnt

root@client:/mnt# df -Th /dev/rbd1
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  2.9G  209M  2.6G   8% /mnt

#拉伸系統
#1、取消掛載
root@client:~# umount /mnt
#2、拉伸/dev/rbd1
root@client:~# resize2fs /dev/rbd1
#3、重新掛載
root@client:~# mount /dev/rbd1 /mnt

root@client:~# df -Th /dev/rbd1
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  7.9G  214M  7.3G   3% /mnt
root@client:~# df -Th /mnt
Filesystem     Type  Size  Used Avail Use% Mounted on
/dev/rbd1      ext4  7.9G  214M  7.3G   3% /mnt

9)、開機自動掛載

root@client:~#cat /etc/rc.d/rc.local
rbd --user kaka -p myrbd1 map mying3
mount /dev/rbd1 /mnt
[root@ceph-client2 ~]# chmod a+x /etc/rc.d/rc.local

5、rbd的鏡像信息及特性介紹

1)查看鏡像詳細信息

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying1 info
rbd image 'mying1':
    size 5 GiB in 1280 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 144f88aecd24
    block_name_prefix: rbd_data.144f88aecd24
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #鏡像特性
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:08:32 2021
    access_timestamp: Fri Aug 20 22:08:32 2021
    modify_timestamp: Fri Aug 20 22:08:32 2021

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

2)以json 格式顯示鏡像信息

cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l --format json --pretty-format
[
    {
        "image": "mying1",
        "id": "144f88aecd24",
        "size": 5368709120,
        "format": 2
    },
    {
        "image": "mying2",
        "id": "1458dabfc2f1",
        "size": 3221225472,
        "format": 2
    },
    {
        "image": "mying3",
        "id": "1893f853e249",
        "size": 3221225472,
        "format": 2
    }
]

3)鏡像的特性

cephadmin@ceph-deploy:/etc/ceph$ rbd help feature enable 
usage: rbd feature enable [--pool <pool>] [--namespace <namespace>] 
                          [--image <image>] 
                          [--journal-splay-width <journal-splay-width>] 
                          [--journal-object-size <journal-object-size>] 
                          [--journal-pool <journal-pool>] 
                          <image-spec> <features> [<features> ...] 

Enable the specified image feature.

Positional arguments
  <image-spec>              image specification
                            (example: [<pool-name>/[<namespace>/]]<image-name>)
  <features>                image features
                            [exclusive-lock, object-map, journaling]

Optional arguments
  -p [ --pool ] arg         pool name
  --namespace arg           namespace name
  --image arg               image name
  --journal-splay-width arg number of active journal objects
  --journal-object-size arg size of journal objects [4K <= size <= 64M]
  --journal-pool arg        pool for journal objects

特性簡介

(1)layering: 支持鏡像分層快照特性,用於快照及寫時復制,可以對image 創建快照並保護,然后從快照克隆出新的image 出來,父子image 之間采用COW 技術,共享對象數據。
(2)striping: 支持條帶化v2,類似raid 0,只不過在ceph 環境中的數據被分散到不同的對象中,可改善順序讀寫場景較多情況下的性能。
(3)exclusive-lock: 支持獨占鎖,限制一個鏡像只能被一個客戶端使用。
(4)object-map: 支持對象映射(依賴exclusive-lock),加速數據導入導出及已用空間統計等,此特性開啟的時候,會記錄image 所有對象的一個位圖,用以標記對象是否真的存在,在一些場景下可以加速io。
(5)fast-diff: 快速計算鏡像與快照數據差異對比(依賴object-map)。
(6)deep-flatten: 支持快照扁平化操作,用於快照管理時解決快照依賴關系等。
(7)journaling: 修改數據是否記錄日志,該特性可以通過記錄日志並通過日志恢復數據(依賴獨占鎖),開啟此特性會增加系統磁盤IO 使用。
(8)jewel 默認開啟的特性包括: layering/exlcusive lock/object map/fast diff/deep flatten

4) 鏡像特性的啟用

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021


#啟用指定存儲池中的指定鏡像的特性:
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable exclusive-lock --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable object-map --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd feature enable fast-diff --pool myrbd1 --image mying2

#驗證
cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff  #特性已開啟
    op_features: 
    flags: object map invalid, fast diff invalid
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

5)鏡像特性的禁用

#禁用指定存儲池中指定鏡像的特性
cephadmin@ceph-deploy:/etc/ceph$ cephadmin@ceph-deploy:/etc/ceph$ rbd feature disable fast-diff --pool myrbd1 --image mying2

cephadmin@ceph-deploy:/etc/ceph$ rbd --pool myrbd1 --image mying2 info
rbd image 'mying2':
    size 3 GiB in 768 objects
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: 1458dabfc2f1
    block_name_prefix: rbd_data.1458dabfc2f1
    format: 2
    features: layering, exclusive-lock #fast-diff特性已關閉
    op_features: 
    flags: 
    create_timestamp: Fri Aug 20 22:11:30 2021
    access_timestamp: Fri Aug 20 22:11:30 2021
    modify_timestamp: Fri Aug 20 22:11:30 2021

6、客戶端卸載rbd鏡像

root@client:~#umount /mnt
root@client:~#rbd --pool myrbd1 unmap mying3

7、ceph永久刪除rbd鏡像

鏡像刪除后數據也會被刪除而且是無法恢復,因此在執行刪除操作的時候要慎重。

cephadmin@ceph-deploy:/etc/ceph$ rbd help rm
usage: rbd rm [--pool <pool>] [--namespace <namespace>] [--image <image>] 
              [--no-progress] 
              <image-spec> 

Delete an image.

Positional arguments
  <image-spec>         image specification
                       (example: [<pool-name>/[<namespace>/]]<image-name>)

Optional arguments
  -p [ --pool ] arg    pool name
  --namespace arg      namespace name
  --image arg          image name
  --no-progress        disable progress output
#刪除pool=myrbd1中的mying1鏡像
cephadmin@ceph-deploy:/etc/ceph$ rbd rm --pool myrbd1 --image mying1
Removing image: 100% complete...done.
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 
mying2
mying3

8、rbd 鏡像回收站機制

刪除的鏡像數據無法恢復,但是還有另外一種方法可以先把鏡像移動到回收站,后期確認刪除的時候再從回收站刪除即可。

cephadmin@ceph-deploy:/etc/ceph$rbd trash --help
status Show the status of this image.
trash list (trash ls) List trash images.
trash move (trash mv) Move an image to the trash.
trash purge Remove all expired images from trash.
trash remove (trash rm) Remove an image from trash.
trash restore Restore an image from trash.
#查看鏡像的狀態
cephadmin@ceph-deploy:/etc/ceph$ rbd status --pool=myrbd1 --image=mying3
Watchers:
    watcher=172.168.32.111:0/80535927 client.14665 cookie=18446462598732840962

cephadmin@ceph-deploy:/etc/ceph$ rbd status --pool=myrbd1 --image=mying2
Watchers:
    watcher=172.168.32.111:0/1284154910 client.24764 cookie=18446462598732840961

#將mying2進行移動到回收站
cephadmin@ceph-deploy:/etc/ceph$ rbd trash move --pool myrbd1 --image mying2
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1
mying3

#查看回收站的鏡像
cephadmin@ceph-deploy:/etc/ceph$ rbd trash list --pool myrbd1
1458dabfc2f1 mying2  #1458dabfc2f1鏡像ID,恢復鏡像時需要使用ID

#從回收站還原鏡像
cephadmin@ceph-deploy:/etc/ceph$ rbd trash restore --pool myrbd1 --image mying2 --image-id 1458dabfc2f1
cephadmin@ceph-deploy:/etc/ceph$ rbd ls --pool myrbd1 -l
NAME    SIZE   PARENT  FMT  PROT  LOCK
mying2  3 GiB            2            
mying3  8 GiB            2 

#永久刪除回收站的鏡像
#如果鏡像不再使用,可以直接使用trash remove 將其從回收站刪除
cephadmin@ceph-deploy:/etc/ceph$rbd trash remove --pool myrbd1 --image-id 1458dabfc2f1

9、rbd鏡像快照

1)、鏡像快照命令

 
cephadmin@ceph-deploy:/etc/ceph$rbd help snap
snap create (snap add) #創建快照
snap limit clear #清除鏡像的快照數量限制
snap limit set #設置一個鏡像的快照上限
snap list (snap ls) #列出快照
snap protect #保護快照被刪除
snap purge #刪除所有未保護的快照
snap remove (snap rm) #刪除一個快照
snap rename #重命名快照
snap rollback (snap revert) #還原快照
snap unprotect #允許一個快照被刪除(取消快照保護)

2)、創建快照

#在客戶端查看當前數據
root@client:~# ll -h /mnt
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test

#在ceph-deploy創建快照
cephadmin@ceph-deploy:/etc/ceph$ rbd help snap create
usage: rbd snap create [--pool <pool>] [--namespace <namespace>] 
                       [--image <image>] [--snap <snap>] [--skip-quiesce] 
                       [--ignore-quiesce-error] [--no-progress] 
                       <snap-spec> 

cephadmin@ceph-deploy:/etc/ceph$ rbd snap create --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Creating snap: 100% complete...done.
#驗證快照
cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
SNAPID  NAME                  SIZE   PROTECTED  TIMESTAMP               
     4  mying3-snap-20210823  8 GiB             Mon Aug 23 14:01:30 2021

3)、客戶端刪除數據,並使用快照還原

#客戶端刪除數據,並卸載rbd
root@client:~# rm -rf /mnt/rbd-test 
root@client:~# ll /mnt
total 20
drwxr-xr-x  3 root root  4096 Aug 23 14:03 ./
drwxr-xr-x 22 root root   326 Aug 17 09:56 ../
drwx------  2 root root 16384 Aug 23 13:04 lost+found/
root@client:~# umount /mnt
root@client:~# rbd --pool myrbd1 unmap --image mying3

#使用快照恢復數據
root@client:~# rbd help snap rollback 
usage: rbd snap rollback [--pool <pool>] [--namespace <namespace>] 
                         [--image <image>] [--snap <snap>] [--no-progress] 
                         <snap-spec> 
#回滾快照
cephadmin@ceph-deploy:/etc/ceph$ sudo rbd snap rollback --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Rolling back to snapshot: 100% complete...done.
#在客戶端驗證
root@client:~# rbd --pool myrbd1 map mying3
/dev/rbd1
root@client:~# mount /dev/rbd1 /mnt
root@client:~# ll -h /mnt
total 201M
drwxr-xr-x  3 root root 4.0K Aug 23 13:06 ./
drwxr-xr-x 22 root root  326 Aug 17 09:56 ../
drwx------  2 root root  16K Aug 23 13:04 lost+found/
-rw-r--r--  1 root root 200M Aug 23 13:06 rbd-test
#數據恢復成功

4)、刪除指定快照

cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
SNAPID  NAME                  SIZE   PROTECTED  TIMESTAMP               
     4  mying3-snap-20210823  8 GiB             Mon Aug 23 14:01:30 2021
     
cephadmin@ceph-deploy:/etc/ceph$ rbd snap rm --pool myrbd1 --image mying3 --snap mying3-snap-20210823
Removing snap: 100% complete...done.

cephadmin@ceph-deploy:/etc/ceph$ rbd snap list --pool myrbd1 --image mying3
cephadmin@ceph-deploy:/etc/ceph$ 

5)、限制快照數量

#設置與修改快照數量限制
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 30
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 20
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit set --pool myrbd1 --image mying3 --limit 15
#清除快照數量限制
cephadmin@ceph-deploy:/etc/ceph$ rbd snap limit clear --pool myrbd1 --image mying3

 

1、Cephfs介紹

ceph FS 即ceph filesystem,可以實現文件系統共享功能,客戶端通過ceph 協議掛載並使用ceph 集群作為數據存儲服務器。 Ceph FS 需要運行Meta Data Services(MDS)服務,其守護進程為ceph-mds,ceph-mds 進程管理與cephFS 上存儲的文件相關的元數據,並協調對ceph 存儲集群的訪問。

cephfs 的元數據使用的動態子樹分區,把元數據划分名稱空間對應到不同的mds,寫入元數據的時候將元數據按照名稱保存到不同主mds 上,有點類似於nginx 中的緩存目錄分層一樣。

 

2、部署cephfs客戶端

1)、查看cephfs的mds狀態

cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:2 {0=ceph-mds01=up:active,1=ceph-mgr02=up:active} 2 up:standby

 

2)、在ceph管理節點(ceph-deploy)創建普通客戶端

 
#創建kingseal普通用戶
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph auth add client.kingseal mon 'allow r' mds "allow rw" osd "allow rwx pool=cephfs-data"
added key for client.kingseal

#驗證用戶
cephadmin@ceph-deploy:~/ceph-cluster$ sudo ceph auth get client.kingseal
[client.kingseal]
	key = AQCDWSZhj1yTJRAASUnXfwFkKab1CrIJV/5uDw==
	caps mds = "allow rw"
	caps mon = "allow r"
	caps osd = "allow rwx pool=cephfs-data"
exported keyring for client.kingseal

#創建kingseal的空keyring
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-authtool --create-keyring ceph.client.kingseal.keyring
creating ceph.client.kingseal.keyring

#將用戶信息導入keyring文件
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth get client.kingseal -o ceph.client.kingseal.keyring
exported keyring for client.kingseal

#創建key文件,cephfs掛載使用
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth print-key client.kingseal
AQCDWSZhj1yTJRAASUnXfwFkKab1CrIJV/5uDw==

  

3)、用戶信息拷貝到客戶端

cephadmin@ceph-deploy:~/ceph-cluster$scp ceph.conf ceph.client.kingseal.keyring kingseal.key 172.168.32.111:/etc/ceph

  

4)、客戶端權限驗證

root@client:~# ceph --user kingseal -s
  cluster:
    id:     fdefc83b-1ef9-4986-a4c1-8af7603f43bf
    health: HEALTH_WARN
            clock skew detected on mon.ceph-mon03
 
  services:
    mon: 3 daemons, quorum ceph-mon01,ceph-mon02,ceph-mon03 (age 37m)
    mgr: ceph-mgr01(active, since 56m), standbys: ceph-mgr02
    mds: 2/2 daemons up, 2 standby
    osd: 16 osds: 16 up (since 41m), 16 in (since 41m)
 
  data:
    volumes: 1/1 healthy
    pools:   3 pools, 97 pgs
    objects: 41 objects, 3.6 KiB
    usage:   104 MiB used, 800 GiB / 800 GiB avail
    pgs:     97 active+clean

  

5)、掛載cephfs

客戶端掛載有兩種方式,一是內核空間一是用戶空間,內核空間掛載需要內核支持ceph 模塊,用戶空間掛載需要安裝ceph-fuse。

(1)客戶端通過key文件掛載

#創建掛載目錄cephfs-key
root@client:~# mkdir /cephfs-key
#掛載cephfs
root@client:~# mount -t ceph 172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789:/ /cephfs-key -o name=kingseal,secretfile=/etc/ceph/kingseal.key

#注意:172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789為ceph-mon地址和端口

  

(2)客戶端直接通過key掛載

root@client:~# cat /etc/ceph/kingseal.key 
AQCDWSZhj1yTJRAASUnXfwFkKab1CrIJV/5uDw==

root@client:~# mount -t ceph 172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789:/ /cephfs-key -o name=kingseal,secret=AQCDWSZhj1yTJRAASUnXfwFkKab1CrIJV/5uDw==

  

客戶端模塊

客戶端內核加載ceph.ko 模塊掛載cephfs 文件系統

root@client:~# lsmod | grep ceph
ceph                  376832  1
libceph               315392  1 ceph
fscache                65536  1 ceph
libcrc32c              16384  3 xfs,raid456,libceph
root@client:~# modinfo ceph
filename:       /lib/modules/4.15.0-112-generic/kernel/fs/ceph/ceph.ko
license:        GPL
description:    Ceph filesystem for Linux
author:         Patience Warnick <patience@newdream.net>
author:         Yehuda Sadeh <yehuda@hq.newdream.net>
author:         Sage Weil <sage@newdream.net>
alias:          fs-ceph
srcversion:     B2806F4EAACAC1E19EE7AFA
depends:        libceph,fscache
retpoline:      Y
intree:         Y
name:           ceph
vermagic:       4.15.0-112-generic SMP mod_unload 
signat:         PKCS#7
signer:         
sig_key:        
sig_hashalgo:   md4

  

(3)用戶空間(ceph-fsus)掛載

如果內核本較低而沒有ceph 模塊,那么可以安裝ceph-fuse 掛載,但是推薦使用內核模塊掛載。

#安裝ceph-fuse
apt install ceph-fuse
#創建掛載目錄ceph-fsus
mkdir ceph-fsus
#使用ceph-fsus掛載
root@client:~# ll /etc/ceph/
total 32
drwxr-xr-x   2 root root  126 Aug 25 23:07 ./
drwxr-xr-x 100 root root 8192 Aug 25 23:07 ../
-rw-------   1 root root  151 Aug 25 22:49 ceph.client.admin.keyring
-rw-------   1 root root  152 Aug 25 23:02 ceph.client.kingseal.keyring
-rw-r--r--   1 root root  516 Aug 25 23:02 ceph.conf
-rw-r--r--   1 root root   40 Aug 25 23:02 kingseal.key
-rw-r--r--   1 root root   92 Jun  7 22:39 rbdmap
#client.kingseal為ceph.client.kingseal.keyring
root@client:~# ceph-fuse --name client.kingseal -m 172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789 ceph-fsus/

ceph-fuse[5569]: starting ceph client
2021-08-25T23:28:07.140+0800 7f4feeea9100 -1 init, newargv = 0x55a107cc4920 newargc=15
ceph-fuse[5569]: starting fuse

  

(4)驗證

root@client:~# df -Th
......
172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789:/ ceph            254G     0  254G   0% /cephfs-key
ceph-fuse                                                     fuse.ceph-fuse  254G     0  254G   0% /root/ceph-fsus

  

6)、開機掛載

(1)fstab

cat /etc/fstab
172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789:/ /cephfs-key ceph defaults,name=kingseal,secretfile=/etc/ceph/kingseal.key,_netdev 0 0

  

(2) rc.local

chmod +x /etc/rc.local

cat /etc/rc.local
mount -t ceph 172.168.32.104:6789,172.168.32.105:6789,172.168.32.106:6789:/ /cephfs-key -o name=kingseal,secretfile=/etc/ceph/kingseal.key

  

三、對象存儲RadosGW的使用

1、RadosGW對象存儲說明

 RadosGW 是對象存儲(OSS,Object Storage Service)的一種實現方式,RADOS 網關也稱為Ceph對象網關、RADOSGW、RGW,是一種服務,使客戶端能夠利用標准對象存儲API 來訪問Ceph集群,它支持AWS S3 和Swift API,rgw 運行於librados 之上,在ceph 0.8 版本之后使用Civetweb的web 服務器來響應api 請求,可以使用nginx 或或者apache 替代,客戶端基於http/https協議通過RESTful API 與rgw 通信,而rgw 則使用librados 與ceph 集群通信,rgw 客戶端通過s3 或者swift api 使用rgw 用戶進行身份驗證,然后rgw 網關代表用戶利用cephx 與ceph存儲進行身份驗證。

S3 由Amazon 於2006 年推出,全稱為Simple Storage Service,S3 定義了對象存儲,是對象存儲事實上的標准,從某種意義上說,S3 就是對象存儲,對象存儲就是S3,它對象存儲市場的霸主,后續的對象存儲都是對S3 的模仿。

  

2、對象存儲的特點

1)通過對象存儲將數據存儲為對象,每個對象除了包含數據,還包含數據自身的元數據。
2)對象通過Object ID 來檢索,無法通過普通文件系統的方式通過文件路徑及文件名稱操作來直接訪問對象,只能通過API 來訪問,或者第三方客戶端(實際上也是對API 的封裝)。
3)對象存儲中的對象不整理到目錄樹中,而是存儲在扁平的命名空間中,Amazon S3 將這個扁平命名空間稱為bucket,而swift 則將其稱為容器。
4)無論是bucket 還是容器,都不能嵌套。
5)bucket 需要被授權才能訪問到,一個帳戶可以對多個bucket 授權,而權限可以不同。
6)方便橫向擴展、快速檢索數據。
7)不支持客戶端掛載,且需要客戶端在訪問的時候指定文件名稱。
8)不是很適用於文件過於頻繁修改及刪除的場景。

  

ceph 使用bucket 作為存儲桶(存儲空間),實現對象數據的存儲和多用戶隔離,數據存儲在bucket 中,用戶的權限也是針對bucket 進行授權,可以設置用戶對不同的bucket 擁有不通的權限,以實現權限管理。

bucket 特性:

1)存儲空間是您用於存儲對象(Object)的容器,所有的對象都必須隸屬於某個存儲空間,可以設置和修改存儲空間屬性用來控制地域、訪問權限、生命周期等,這些屬性設置直接作用於該存儲空間內所有對象,因此您可以通過靈活創建不同的存儲空間來完成不同的管理功能。
2)同一個存儲空間的內部是扁平的,沒有文件系統的目錄等概念,所有的對象都直接隸屬於其對應的存儲空間。
3)每個用戶可以擁有多個存儲空間
4)存儲空間的名稱在OSS 范圍內必須是全局唯一的,一旦創建之后無法修改名稱。
5)存儲空間內部的對象數目沒有限制。

  

bucket 命名規范:

 
1)只能包括小寫字母、數字和短橫線(-)。
2)必須以小寫字母或者數字開頭和結尾。
3)長度必須在3-63 字節之間

 Radosgw的架構圖

Radosgw的邏輯圖

3、對象存儲的訪問對比

1)Amazon S3:提供了user、bucket 和object 分別表示用戶、存儲桶和對象,其中bucket 隸屬於user,可以針對user 設置不同bucket 的名稱空間的訪問權限,而且不同用戶允許訪問相同的bucket。
2)OpenStack Swift:提供了user、container 和object 分別對應於用戶、存儲桶和對象,不過它還額外為user 提供了父級組件account,用於表示一個項目或租戶,因此一個account 中可包含一到多個user,它們可共享使用同一組container,並為container 提供名稱空間。
3)RadosGW:提供了user、subuser、bucket 和object,其中的user 對應於S3 的user,而subuser則對應於Swift 的user,不過user 和subuser 都不支持為bucket 提供名稱空間,因此,不同用戶的存儲桶也不允許同名;不過,自Jewel 版本起,RadosGW 引入了tenant(租戶)用於為user 和bucket 提供名稱空間,但它是個可選組件,RadosGW 基於ACL 為不同的用戶設置不同的權限控制,如:
Read 讀加執行權限
Write 寫權限
Readwrite 只讀
full-control 全部控制權限

  

4、RadosGW的服務配置

radosgw是部署在ceph-mgr01和ceph-mgr02上

4.1 radosgw的高可用架構

 

4.2自定義radosgw的端口

配置文件可以在ceph deploy 服務器修改然后統一推送,或者單獨修改每個radosgw 服務器的配置為同一配置。

修改默認端口7480為8080端口,在ceph-deploy上配置,並推送個其它所有節點。

ceph-deploy上修改ceph.conf文件

cephadmin@ceph-deploy:~/ceph-cluster$cat ceph.conf
[global]
fsid = c31ea2e3-47f7-4247-9d12-c0bf8f1dfbfb
public_network = 172.168.0.0/16
cluster_network = 10.0.0.0/16
mon_initial_members = ceph-mon01
mon_host = 172.168.32.104
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx


[mds.ceph-mds02]
#mds_standby_for_fscid = mycephfs
mds_standby_for_name = ceph-mds01
mds_standby_replay = true
[mds.ceph-mds03]
mds_standby_for_name = ceph-mgr02
mds_standby_replay = true

#增加以下內容,client.rgw后面為主機名
[client.rgw.ceph-mgr02]
rgw_host = ceph-mgr02
rgw_frontends = civetweb port=8080
[client.rgw.ceph-mgr01]
rgw_host = ceph-mgr01
rgw_frontends = civetweb port=8080                                      

  將配置文件推送到集群其它節點中

# 推送完成之后,所有節點的/etc/ceph/ceph.conf配置文件將和上面一致
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mgr{01..02}
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-mon{01..03}
cephadmin@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf config push ceph-node{01..03}

  重啟前查看radosgw的端口為7480

root@ceph-mgr02:~# ss -antlp|grep 7480
LISTEN   0         128                  0.0.0.0:7480             0.0.0.0:*       users:(("radosgw",pid=13832,fd=74))                                            
LISTEN   0         128                     [::]:7480                [::]:*       users:(("radosgw",pid=13832,fd=75))  

  

重啟radosgw服務

 
root@ceph-mgr01:~# ps -ef|grep radosgw
ceph     13551     1  0 15:19 ?        00:00:58 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr01 --setuser ceph --setgroup ceph

root@ceph-mgr02:~# ps -ef|grep radosgw
ceph     13832     1  0 15:19 ?        00:00:55 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr02 --setuser ceph --setgroup ceph


root@ceph-mgr01:~# systemctl restart ceph-radosgw@rgw.ceph-mgr01
root@ceph-mgr02:~# systemctl restart ceph-radosgw@rgw.ceph-mgr02
#查看radosgw的端口該為了8080
root@ceph-mgr02:~# ss -antlp|grep 8080
LISTEN   0         128                  0.0.0.0:8080             0.0.0.0:*       users:(("radosgw",pid=15954,fd=69))       

  驗證

cephadmin@ceph-deploy:~/ceph-cluster$ curl 172.168.32.102:8080
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

cephadmin@ceph-deploy:~/ceph-cluster$ curl 172.168.32.103:8080
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

  

4.3 啟用radosgw的ssl配置

僅在ceph-mgr01上配置

在ceph-mgr01上配置自簽名證書

openssl req -newkey rsa:4096 -nodes -sha256 -keyout ca.key -x509 -days 3650 -out ca.crt
openssl req -newkey rsa:4096 -nodes -sha256 -keyout civetweb.key -out civetweb.csr
openssl x509 -req -days 3650 -in civetweb.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out civetweb.crt
#驗證證書
root@ceph-mgr01:/etc/ceph/certs# ll
total 24
drwxr-xr-x 2 root root  108 Aug 30 23:34 ./
drwxr-xr-x 3 root root  117 Aug 30 23:23 ../
-rw-r--r-- 1 root root 2106 Aug 30 23:32 ca.crt
-rw------- 1 root root 3268 Aug 30 23:31 ca.key
-rw-r--r-- 1 root root   41 Aug 30 23:34 ca.srl
-rw-r--r-- 1 root root 1996 Aug 30 23:34 civetweb.crt
-rw-r--r-- 1 root root 1744 Aug 30 23:33 civetweb.csr
-rw------- 1 root root 3272 Aug 30 23:32 civetweb.key
#將證書導入為civetweb.pem
root@ceph-mgr01:/etc/ceph/certs# cat civetweb.key civetweb.crt > civetweb.pem
root@ceph-mgr01:/etc/ceph/certs# tree
.
├── ca.crt
├── ca.key
├── ca.srl
├── civetweb.crt
├── civetweb.csr
├── civetweb.key
└── civetweb.pem

  

在ceph-mgr01上配置ceph.conf文件

注意:在生產案例中ceph-mgr01和ceph-mgr02都要配置,並且所有節點的ceph.conf的配置文件都必須一樣

root@ceph-mgr01:/etc/ceph/certs# vim /etc/ceph/ceph.conf
[global]
fsid = c31ea2e3-47f7-4247-9d12-c0bf8f1dfbfb
public_network = 172.168.0.0/16
cluster_network = 10.0.0.0/16
mon_initial_members = ceph-mon01
mon_host = 172.168.32.104
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx


[mds.ceph-mds02]
#mds_standby_for_fscid = mycephfs
mds_standby_for_name = ceph-mds01
mds_standby_replay = true
[mds.ceph-mds03]
mds_standby_for_name = ceph-mgr02
mds_standby_replay = true


[client.rgw.ceph-mgr02]
rgw_host = ceph-mgr02
rgw_frontends = civetweb port=8080

[client.rgw.ceph-mgr01]
rgw_host = ceph-mgr01
#ssl配置
rgw_frontends = "civetweb port=8080+8443s ssl_certificate=/etc/ceph/certs/civetweb.pem"

  在ceph-mgr01上重啟radosgw

root@ceph-mgr01:/etc/ceph/certs# ps -ef|grep radosgw
ceph     17393     1  0 22:19 ?        00:00:12 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr01 --setuser ceph --setgroup ceph
root     18454 18265  0 23:40 pts/0    00:00:00 grep --color=auto radosgw
root@ceph-mgr01:/etc/ceph/certs# systemctl restart ceph-radosgw@rgw.ceph-mgr01.service

  在ceph-mgr01上驗證端口

root@ceph-mgr01:/etc/ceph/certs# ss -antlp|grep 8443
LISTEN   0         128                  0.0.0.0:8443             0.0.0.0:*       users:(("radosgw",pid=18459,fd=72))   

  驗證

cephadmin@ceph-deploy:~/ceph-cluster$ curl http://172.168.32.102:8080
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

cephadmin@ceph-deploy:~/ceph-cluster$ curl -k https://172.168.32.102:8443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

  

4.4 radosgw的優化配置

#創建日志目錄
root@ceph-mgr01:/etc/ceph/certs# mkdir /var/log/radosgw
root@ceph-mgr01:/etc/ceph/certs# chown ceph.ceph /var/log/radosgw

#修改ceph-mgr01的ceph.conf配置
[global]
fsid = c31ea2e3-47f7-4247-9d12-c0bf8f1dfbfb
public_network = 172.168.0.0/16
cluster_network = 10.0.0.0/16
mon_initial_members = ceph-mon01
mon_host = 172.168.32.104
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx


[mds.ceph-mds02]
#mds_standby_for_fscid = mycephfs
mds_standby_for_name = ceph-mds01
mds_standby_replay = true
[mds.ceph-mds03]
mds_standby_for_name = ceph-mgr02
mds_standby_replay = true


[client.rgw.ceph-mgr02]
rgw_host = ceph-mgr02
rgw_frontends = civetweb port=8080

[client.rgw.ceph-mgr01]
rgw_host = ceph-mgr01
#配置日志文件、超時時間及線程數
rgw_frontends = "civetweb port=8080+8443s ssl_certificate=/etc/ceph/certs/civetweb.pem request_timeout_ms=3000 error_log_file=/var/log/radosgw/civetweb.error.log access_log_file=/var/log/radosgw/civetweb.access.log num_threads=100"

  重啟ceph-mgr01上radosgw

root@ceph-mgr01:/etc/ceph/certs# ps -ef|grep radosgw
ceph     18459     1  0 23:41 ?        00:00:01 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr01 --setuser ceph --setgroup ceph

root@ceph-mgr01:/etc/ceph/certs# systemctl restart ceph-radosgw@rgw.ceph-mgr01.service

  

訪問測試:

 
cephadmin@ceph-deploy:~/ceph-cluster$ curl -k https://172.168.32.102:8443
cephadmin@ceph-deploy:~/ceph-cluster$ curl -k https://172.168.32.102:8443
cephadmin@ceph-deploy:~/ceph-cluster$ curl -k https://172.168.32.102:8443
#在ceph-mgr01上查看訪問日志
root@ceph-mgr01:/etc/ceph/certs# tail -10 /var/log/radosgw/civetweb.access.log 
172.168.32.101 - - [30/Aug/2021:23:54:17 +0800] "GET / HTTP/1.1" 200 413 - curl/7.58.0
172.168.32.101 - - [30/Aug/2021:23:54:18 +0800] "GET / HTTP/1.1" 200 413 - curl/7.58.0
172.168.32.101 - - [30/Aug/2021:23:54:18 +0800] "GET / HTTP/1.1" 200 413 - curl/7.58.0
172.168.32.101 - - [30/Aug/2021:23:54:19 +0800] "GET / HTTP/1.1" 200 413 - curl/7.58.0

  

5、使用S3 API 訪問對象存儲

5.1)創建radosgw用戶

在ceph-deploy上創建radosgwadmin賬戶

 
cephadmin@ceph-deploy:~/ceph-cluster$ radosgw-admin user create --uid=radosgwadmin --display-name='radosgwadmin'
{
    "user_id": "radosgwadmin",
    "display_name": "radosgwadmin",
    "email": "",
    "suspended": 0,
    "max_buckets": 1000,
    "subusers": [],
    "keys": [
        {
            "user": "radosgwadmin",
            "access_key": "N4CFRDJ2O503H3QMA30Y",
            "secret_key": "68Ude2uJmCMRHhV9HpABujFMtcd9ZS2NVsBz6RG8"
        }
    ],
    "swift_keys": [],
    "caps": [],
    "op_mask": "read, write, delete",
    "default_placement": "",
    "default_storage_class": "",
    "placement_tags": [],
    "bucket_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "user_quota": {
        "enabled": false,
        "check_on_raw": false,
        "max_size": -1,
        "max_size_kb": 0,
        "max_objects": -1
    },
    "temp_url_keys": [],
    "type": "rgw",
    "mfa_ids": []
}

#使用下面命令查看已創建的賬戶信息
root@ceph-mgr01:/etc/ceph/certs# radosgw-admin user info --uid=radosgwadmin --display-name='radosgwadmin'

  

5.2)在客戶端安裝s3cmd軟件

s3cmd是一個通過命令行訪問ceph RGW實現創建存儲桶、上傳、下載及管理數據到對象存儲的命令行客戶端工具

root@client:~# apt install -y s3cmd

  

5.3)在客戶端生成s3cmd的配置文件

root@client:~# s3cmd --configure
Enter new values or accept defaults in brackets with Enter.
Refer to user manual for detailed description of all options.

Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables.
Access Key: N4CFRDJ2O503H3QMA30Y                                        # 粘貼服務端生成的Access Key
Secret Key: 68Ude2uJmCMRHhV9HpABujFMtcd9ZS2NVsBz6RG8                    # 粘貼服務端生成的Secret Key
Default Region [US]:                                                    # 直接回車即可

Use "s3.amazonaws.com" for S3 Endpoint and not modify it to the target Amazon S3.
S3 Endpoint [s3.amazonaws.com]: 172.168.32.102:8080             # 輸入對象存儲的IP地址,可以為域名和radosgw的VIP

Use "%(bucket)s.s3.amazonaws.com" to the target Amazon S3. "%(bucket)s" and "%(location)s" vars can be used
if the target S3 system supports dns based buckets.
DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]:  172.168.32.102:8080/%(bucket)            # 輸入對象存儲的bucket地址

Encryption password is used to protect your files from reading
by unauthorized persons while in transfer to S3
Encryption password:                                                     # 空密碼回車
Path to GPG program [/usr/bin/gpg]:                                      # /usr/bin/gpg命令路徑 回車

When using secure HTTPS protocol all communication with Amazon S3
servers is protected from 3rd party eavesdropping. This method is
slower than plain HTTP, and can only be proxied with Python 2.7 or newer
Use HTTPS protocol [Yes]: no                                             # 是否使用https,選no

On some networks all internet access must go through a HTTP proxy.
Try setting it here if you can't connect to S3 directly
HTTP Proxy server name:                                                  # haproxy 留空回車

New settings:
  Access Key: D028HA7T16KJHU2602YA
  Secret Key: RWczKVORMdDBw2mtgLs2dUPq2xrCehnjOtB6pHPY
  Default Region: US
  S3 Endpoint: 192.168.5.91
  DNS-style bucket+hostname:port template for accessing a bucket:  %(bucket).172.168.32.102
  Encryption password: 
  Path to GPG program: /usr/bin/gpg
  Use HTTPS protocol: False
  HTTP Proxy server name: 
  HTTP Proxy server port: 0

Test access with supplied credentials? [Y/n] y      #測試通過會提示保存
Please wait, attempting to list all buckets...
Success. Your access key and secret key worked fine :-)

Now verifying that encryption works...
Not configured. Never mind.

Save settings? [y/N] y                                                    # y 要保存配置文件
Configuration saved to '/root/.s3cfg'                                     # 最后配置文件保存的位置/root.s3cfg

  

5.4)s3cmd的驗證操作

# 創建my-bucket桶
root@client:~# s3cmd mb s3://my-bucket
Bucket 's3://my-bucket/' created

# 查看所有的桶
root@client:~# s3cmd ls
2021-08-30 16:42  s3://my-bucket

# 向指定桶中上傳/etc/hosts文件
root@client:~# s3cmd put /etc/hosts s3://my-bucket
upload: '/etc/hosts' -> 's3://my-bucket/hosts'  [1 of 1]
186 of 186   100% in    1s    93.30 B/s  done

#向指定目錄下載文件
root@client:~# s3cmd get s3://my-bucket/hosts /tmp/
download: 's3://my-bucket/hosts' -> '/tmp/hosts'  [1 of 1]
 186 of 186   100% in    0s     3.71 kB/s  done


# 顯示my-bucket中的文件
root@client:~# s3cmd ls s3://my-bucket
2021-08-30 16:43       186   s3://my-bucket/hosts

# 刪除my-bucket中的hosts文件
root@client:~# s3cmd del s3://my-bucket/hosts
delete: 's3://my-bucket/hosts'
root@client:~# s3cmd ls s3://my-bucket
root@client:~# 

# 刪除my-bucket
root@client:~# s3cmd rb s3://my-bucket
Bucket 's3://my-bucket/' removed
root@client:~# s3cmd ls
root@client:~# 

#注意:修改bucket存儲的信息,就是重新上傳修改后的信息

  

 

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM