Ceph 學習目錄:
環境:
| IP地址 | 主機名 |
|---|---|
| 192.168.118.14 | ceph-node1 |
| 192.168.118.15 | ceph-node2 |
| 192.168.118.16 | ceph-node3 |
| 192.168.118.17 | ceph-client |
注意:所有節點更新為最新內核版本!
創建 RBD
服務器端操作
創建 pool
[root@ceph-node1 ~/mycluster]#ceph osd pool create rbd 64
pool 'rbd' created
創建客戶端帳號
# 創建客戶端用戶
[root@ceph-node1 ~/mycluster]#ceph auth get-or-create client.rbd mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=rbd'
# 查看用戶及權限
[root@ceph-node1 ~/mycluster]#ceph auth get client.rbd
exported keyring for client.rbd
[client.rbd]
key = AQB6OAhfMN4jFhAAPmO17m5Z5gP5YC11JOJcTA==
caps mon = "allow r"
caps osd = "allow class-read object_prefix rbd_children,allow rwx pool=rbd"
# 導出客戶端keyring
[root@ceph-node1 ~/mycluster]#ceph auth get client.rbd -o ./ceph.client.rbd.keyring
exported keyring for client.rbd
pool 啟動 RBD
[root@ceph-node1 ~/mycluster]#ceph osd pool application enable rbd rbd
enabled application 'rbd' on pool 'rbd'
客戶端操作
安裝 ceph-common
[root@ceph-client ~]#yum install ceph-common -y
從 ceph-node1 拷貝 ceph.conf 和 認證 keyring
[root@ceph-node1 ~/mycluster]#scp ceph.conf ceph.client.rbd.keyring ceph-client:/etc/ceph/
[root@ceph-client ~]#ls /etc/ceph/
ceph.client.rbd.keyring ceph.conf rbdmap
# 使用 創建的用戶 rbd 查看集群狀態
[root@ceph-client ~]#ceph -s --user rbd
cluster:
id: 45757634-b5ec-4172-957d-80c5c9f76d52
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3 (age 65m)
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
創建 image
# 創建 image
[root@ceph-client ~]#rbd create rbd1 -p rbd --size 1G --user rbd
[root@ceph-client ~]#rbd create rbd/rbd2 --size 2G --user rbd
# 查看創建的 image
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 1 GiB 2
rbd2 2 GiB 2
# 通過json格式查看
[root@ceph-client ~]#rbd ls -p rbd -l --format json --user rbd --pretty-format
[
{
"image": "rbd1",
"size": 1073741824,
"format": 2
},
{
"image": "rbd2",
"size": 2147483648,
"format": 2
}
]
# 顯示 image 的詳細信息
[root@ceph-client ~]#rbd info rbd1 --user rbd
rbd image 'rbd1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 112fe2290ad6
block_name_prefix: rbd_data.112fe2290ad6
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Jul 11 09:14:18 2020
access_timestamp: Sat Jul 11 09:14:18 2020
modify_timestamp: Sat Jul 11 09:14:18 2020
禁止 image 的特性
默認 image 的特性包括:
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
作為 rbd 一般只需要 layering ,需要把其他的特性全部禁止掉。
# 禁止 image 特性
[root@ceph-client ~]#rbd feature disable rbd/rbd1 exclusive-lock, object-map, fast-diff, deep-flatten --user rbd
[root@ceph-client ~]#rbd feature disable rbd/rbd2 exclusive-lock, object-map, fast-diff, deep-flatten --user rbd
# 查看詳細信息
[root@ceph-client ~]#rbd info rbd/rbd1 --user rbd
rbd image 'rbd1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 112fe2290ad6
block_name_prefix: rbd_data.112fe2290ad6
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 09:14:18 2020
access_timestamp: Sat Jul 11 09:14:18 2020
modify_timestamp: Sat Jul 11 09:14:18 2020
[root@ceph-client ~]#rbd info rbd/rbd2 --user rbd
rbd image 'rbd2':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 11342244e27f
block_name_prefix: rbd_data.11342244e27f
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 09:14:47 2020
access_timestamp: Sat Jul 11 09:14:47 2020
modify_timestamp: Sat Jul 11 09:14:47 2020
客戶端掛載 Image
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 1 GiB 2
rbd2 2 GiB 2
# RBD 映射到客戶端主機
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
rbd0 251:0 0 1G 0 disk
初始化文件系統
# 格式化磁盤
[root@ceph-client ~]#mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=8, agsize=32768 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@ceph-client ~]#mkdir -pv /mnt/ceph-disk1
mkdir: created directory ‘/mnt/ceph-disk1’
# 掛載文件系統
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/
[root@ceph-client ~]#df -Th
Filesystem Type Size Used Avail Use% Mounted on
devtmpfs devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs tmpfs 3.9G 8.6M 3.9G 1% /run
tmpfs tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/mapper/centos-root xfs 50G 1.9G 49G 4% /
/dev/vda1 xfs 1014M 149M 866M 15% /boot
/dev/mapper/centos-home xfs 42G 33M 42G 1% /home
/dev/sr0 iso9660 4.4G 4.4G 0 100% /mnt/centos7
tmpfs tmpfs 783M 0 783M 0% /run/user/0
/dev/rbd0 xfs 1014M 33M 982M 4% /mnt/ceph-disk1
客戶端卸載磁盤
[root@ceph-client ~]#umount /dev/rbd0
# 查看本地 image 映射
[root@ceph-client ~]#rbd showmapped --user rbd
id pool namespace image snap device
0 rbd rbd1 - /dev/rbd0
# 卸載 image
[root@ceph-client ~]#rbd unmap rbd/rbd1 --user rbd
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
擴展 image 大小
[root@ceph-client ~]#rbd resize -s 5G rbd/rbd1 --user rbd
Resizing image: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
rbd2 2 GiB 2
刪除 image
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
rbd2 2 GiB 2
# 刪除 rbd2
[root@ceph-client ~]#rbd rm rbd2 --user rbd
Removing image: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
image 放進回收站
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
# 將 rbd1 放進回收站
[root@ceph-client ~]#rbd trash move rbd/rbd1 --user rbd
[root@ceph-client ~]#rbd ls -l --user rbd
# 查看回收站
[root@ceph-client ~]#rbd trash list -p rbd --user rbd
112fe2290ad6 rbd1
回收站恢復 image
[root@ceph-client ~]#rbd trash list -p rbd --user rbd
112fe2290ad6 rbd1
# 恢復 rbd1
[root@ceph-client ~]#rbd trash restore -p rbd --image rbd1 --image-id 112fe2290ad6 --user rbd
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
RBD 快照
快照前准備工作:
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
rbd0 251:0 0 5G 0 disk
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/
[root@ceph-client ~]#echo 'this test-1' >> /mnt/ceph-disk1/1.txt
[root@ceph-client ~]#echo 'this test-2' >> /mnt/ceph-disk1/2.txt
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt 2.txt
創建快照
[root@ceph-client ~]#rbd snap create rbd/rbd1@snap1 --user rbd
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap1 5 GiB Sat Jul 11 09:41:19 2020
還原快照
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt 2.txt
# 為了檢驗快照恢復后數據正確性,這里刪除 2.txt 文件
[root@ceph-client ~]#rm -rf /mnt/ceph-disk1/2.txt
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt
# 卸載image
[root@ceph-client ~]#umount /dev/rbd0
[root@ceph-client ~]#rbd unmap rbd/rbd1 --user rbd
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap1 5 GiB Sat Jul 11 09:41:19 2020
# 還原快照
[root@ceph-client ~]#rbd snap rollback rbd/rbd1@snap1 --user rbd
Rolling back to snapshot: 100% complete...done.
# 映射image
[root@ceph-client ~]#rbd map rbd/rbd1 --user rbd
/dev/rbd0
[root@ceph-client ~]#lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 4.4G 0 rom /mnt/centos7
vda 252:0 0 100G 0 disk
├─vda1 252:1 0 1G 0 part /boot
└─vda2 252:2 0 99G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 7.9G 0 lvm [SWAP]
└─centos-home 253:2 0 41.1G 0 lvm /home
rbd0 251:0 0 5G 0 disk
[root@ceph-client ~]#mount /dev/rbd0 /mnt/ceph-disk1/
# 數據恢復到快照前
[root@ceph-client ~]#ls /mnt/ceph-disk1/
1.txt 2.txt
刪除快照
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap1 5 GiB Sat Jul 11 09:41:19 2020
[root@ceph-client ~]#rbd snap rm rbd/rbd1@snap1 --user rbd
Removing snap: 100% complete...done.
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
快照次數限制
[root@ceph-client ~]#rbd snap limit set rbd/rbd1 --limit 10 --user rbd
清除快照次數限制
[root@ceph-client ~]#rbd snap limit clear rbd/rbd1 --user rbd
克隆
這里主要做的是基於快照的克隆,克隆所采用的也是 cow,叫做 copy on write 也就是常說的 “寫時復制”,更貼切的說法叫“寫的時候,再復制”。這里的克隆就是基於快照創建的克隆只創建了映射到源的邏輯,還沒有給克隆分配真實的物理空間。這里要理解這一點。雖然快照是只讀的,但是基於快照創建的克隆是可讀可寫的。當我們對克隆的鏡像執行寫操作的時候,系統才會真正的給克隆的鏡像分配物理空間。克隆的鏡像或者被寫過的克隆鏡像都是可以正常使用的和鏡像本身是一樣的。這就是所謂的 cow。當對克隆的鏡像沒有寫而是讀的時候,那么讀取的是被克隆的快照,明白了上面的道理所有我們知道從快照克隆的鏡像是依賴於快照的,一旦快照被刪除則這個克隆鏡像也就毀了,所以我們要保護這個快照。
創建克隆
# 創建克隆前,第一步要保護快照,以下錯誤提示要求先執行保護快照
[root@ceph-client ~]#rbd clone rbd/rbd1@snap1 rbd/rbd1-snap1-clone --user rbd
2020-07-11 10:05:48.783 7fb34f7fe700 -1 librbd::image::CloneRequest: 0x5647acbcf850 validate_parent: parent snapshot must be protected
rbd: clone error: (22) Invalid argument
# 執行保護快照
[root@ceph-client ~]#rbd snap protect rbd/rbd1@snap1 --user rbd
[root@ceph-client ~]#rbd snap list rbd/rbd1 --user rbd
SNAPID NAME SIZE PROTECTED TIMESTAMP
8 snap1 5 GiB yes Sat Jul 11 10:04:34 2020
# 創建克隆
[root@ceph-client ~]#rbd clone rbd/rbd1@snap1 rbd/rbd1-snap1-clone --user rbd
# 查看克隆
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
rbd1@snap1 5 GiB 2 yes
rbd1-snap1-clone 5 GiB rbd/rbd1@snap1 2
# 查看克隆的詳細信息
[root@ceph-client ~]#rbd info rbd1-snap1-clone --user rbd
rbd image 'rbd1-snap1-clone':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12613ea38941
block_name_prefix: rbd_data.12613ea38941
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 10:07:20 2020
access_timestamp: Sat Jul 11 10:07:20 2020
modify_timestamp: Sat Jul 11 10:07:20 2020
parent: rbd/rbd1@snap1
overlap: 5 GiB
克隆成功的鏡像是依賴於快照的,能看到 parent 和 overlap
比如想要 克隆獨立存在,不依賴於快照,就需要對克隆和快照做一個合並:
# 對克隆進行合並
[root@ceph-client ~]#rbd flatten rbd/rbd1-snap1-clone --user rbd
Image flatten: 100% complete...done.
# 查看克隆是否獨立存在,沒有了 parent 和 overlap
[root@ceph-client ~]#rbd info rbd1-snap1-clone --user rbd
rbd image 'rbd1-snap1-clone':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12613ea38941
block_name_prefix: rbd_data.12613ea38941
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 10:07:20 2020
access_timestamp: Sat Jul 11 10:07:20 2020
modify_timestamp: Sat Jul 11 10:07:20 2020
如果快照不在時候用這里就可以直接刪除快照:
注意:這里刪除快照需要先解除保護模式
[root@ceph-client ~]#rbd info rbd/rbd1@snap1 --user rbd
rbd image 'rbd1':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 1
id: 112fe2290ad6
block_name_prefix: rbd_data.112fe2290ad6
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 09:14:18 2020
access_timestamp: Sat Jul 11 13:44:17 2020
modify_timestamp: Sat Jul 11 09:14:18 2020
protected: True
# 解除對快照的保護
[root@ceph-client ~]#rbd snap unprotect rbd/rbd1@snap1 --user rbd
[root@ceph-client ~]#rbd info rbd/rbd1@snap1 --user rbd
rbd image 'rbd1':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 1
id: 112fe2290ad6
block_name_prefix: rbd_data.112fe2290ad6
format: 2
features: layering
op_features:
flags:
create_timestamp: Sat Jul 11 09:14:18 2020
access_timestamp: Sat Jul 11 13:44:17 2020
modify_timestamp: Sat Jul 11 09:14:18 2020
protected: False
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
rbd1@snap1 5 GiB 2
rbd1-snap1-clone 5 GiB 2
# 刪除快照
[root@ceph-client ~]#rbd snap rm rbd/rbd1@snap1 --user rbd
Removing snap: 100% complete...done.
[root@ceph-client ~]#rbd ls -l --user rbd
NAME SIZE PARENT FMT PROT LOCK
rbd1 5 GiB 2
rbd1-snap1-clone 5 GiB 2
