ceph deploy部署ceph集群 ceph擴容 rbd存儲


架構拓撲

節點主機 節點IP 節點角色 OS
ceph-admin 10.0.0.60 admin deploy mds centos7
ceph-node1 10.0.0.61 mon osd mds centos7
ceph-node2 10.0.0.62 mon osd mds centos7
ceph-node3 10.0.0.63 mon osd mds centos7

前期准備工作[所有ceph服務器都執行]

#基礎優化
#1. yum源優化
rm -f /etc/yum.repos.d/*
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo


#2. selinux關閉
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0

#3. 時間同步
echo '*/5 * * * * /usr/sbin/ntpdate -u ntp.api.bz' >>/var/spool/cron/root
systemctl restart crond.service
crontab -l

#4. 關閉swap分區
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a

#5. 配置ceph源:
cat >/etc/yum.repos.d/ceph.repo<<eof
[ceph]
name=ceph
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
gpgcheck=0
enabled=1
[x86_64]
name=x86_64
baseurl=https://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
gpgcheck=0
enabled=1
eof

yum clean all



#6. 每台服務器多增加一塊20G硬盤 磁盤掃描[用於發現新磁盤]
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan



#7. 配置主機名
cat >/etc/hosts <<eof
10.0.0.60 admin  ceph-admin
10.0.0.61 ceph01 ceph-node1
10.0.0.62 ceph02 ceph-node2
10.0.0.63 ceph03 ceph-node3
eof


#8. 設置各自主機名
hostnamectl set-hostname [主機名]
bash


#8. 配置互信:

ssh-keygen -f ~/.ssh/id_rsa -N ''

ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-admin
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node1
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node2
ssh-copy-id -i ~/.ssh/id_rsa.pub root@ceph-node3


#驗證[遠程查看另一台服務器IP測試執行是否需要密碼]:
[root@ceph-admin ~]# ssh 10.0.0.61 "ifconfig eth0"
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.61  netmask 255.255.255.0  broadcast 10.0.0.255
        inet6 fe80::20c:29ff:fe86:4512  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:86:45:12  txqueuelen 1000  (Ethernet)
        RX packets 70680  bytes 98327829 (93.7 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 11931  bytes 1237409 (1.1 MiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
     
     
拷貝ceph源到其他服務器
cd /etc/yum.repos.d
scp * root@ceph-node1:/etc/yum.repos.d/
scp * root@ceph-node2:/etc/yum.repos.d/
scp * root@ceph-node3:/etc/yum.repos.d/

        
#9. 時間同步:

ceph-admin 作為時間服務器,其他服務器進行同步
9.1. ceph-admin配置:
yum install -y ntp
systemctl start ntpd
systemctl enable ntpd
timedatectl set-timezone Asia/Shanghai    #時區設置為上海

9.2 設置為阿里雲時間同步:
時間服務器配置[ceph-admin服務器作為時間服務器]:
找到:
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst

修改為:
server ntp1.aliyun.com iburst

重啟ntp:
systemctl restart ntpd

查看時間同步: ntpq -pn
[root@ceph-admin ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*120.25.115.20   10.137.53.7      2 u    1   64    1   36.838    5.072   0.622


其他服務器配置:
yum install -y ntp
systemctl start ntpd
systemctl enable ntpd
timedatectl set-timezone Asia/Shanghai    #時區設置為上海

#sed修改:
 sed  -i '20,25s/^server.*/# &/' /etc/ntp.conf
 sed  -i "25iserver 10.0.0.60 iburst" /etc/ntp.conf

#重啟服務:
systemctl restart ntpd
systemctl enable ntpd
ntpq -pn

#查看時間同步狀態,檢查是否與時間服務器同步時間:
[root@ceph-node1 ~]# ntpq -pn
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*10.0.0.60       120.25.115.20    3 u    6   64  377    0.334    0.582   0.501
#出現 * 號 代表同步時間了

安裝ceph [ceph-admin 節點]

[root@ceph-admin ~]# yum install -y python-setuptools ceph-deploy
[root@ceph-admin ~]# ceph-deploy --version
2.0.1         #<-------建議部署2.0版本以上,且只需要在部署節點安裝


#創建目錄,作為初始化目錄
[root@ceph-admin ~]# mkdir /my-cluster
[root@ceph-admin ~]# cd  /my-cluster

#配置monitor節點:
--public-network 10.0.0.0/24     ceph對外的外部網絡
--cluster-network 10.0.0.0/24    ceph的內部網絡

[root@ceph-admin my-cluster]# ceph-deploy new  --public-network 10.0.0.0/24 --cluster-network 10.0.0.0/24 ceph-admin
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy new --public-network 10.0.0.0/24 --cluster-network 10.0.0.0/24 ceph-admin
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  func                          : <function new at 0x14a3140>
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x15089e0>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  ssh_copykey                   : True
[ceph_deploy.cli][INFO  ]  mon                           : ['ceph-admin']
[ceph_deploy.cli][INFO  ]  public_network                : 10.0.0.0/24
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  cluster_network               : 10.0.0.0/24
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  fsid                          : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO  ] making sure passwordless SSH succeeds
[ceph-admin][DEBUG ] connected to host: ceph-admin 
[ceph-admin][DEBUG ] detect platform information from remote host
[ceph-admin][DEBUG ] detect machine type
[ceph-admin][DEBUG ] find the location of an executable
[ceph-admin][INFO  ] Running command: /usr/sbin/ip link show
[ceph-admin][INFO  ] Running command: /usr/sbin/ip addr show
[ceph-admin][DEBUG ] IP addresses found: [u'10.0.0.60']
[ceph_deploy.new][DEBUG ] Resolving host ceph-admin
[ceph_deploy.new][DEBUG ] Monitor ceph-admin at 10.0.0.60
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-admin']
[ceph_deploy.new][DEBUG ] Monitor addrs are [u'10.0.0.60']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...


配置節點后生成了3個文件:
[root@ceph-admin my-cluster]# ll
total 12
-rw-r--r-- 1 root root  256 Oct 10 00:21 ceph.conf             # 配置文件
-rw-r--r-- 1 root root 3034 Oct 10 00:21 ceph-deploy-ceph.log  # 日志文件
-rw------- 1 root root   73 Oct 10 00:21 ceph.mon.keyring      # 密鑰文件,作為身份驗證

[root@ceph-admin my-cluster]# cat ceph.conf 
[global]
fsid = ce3bead3-55ca-4b88-9dff-0c7dd4db1880
public_network = 10.0.0.0/24         #外部網絡
cluster_network = 10.0.0.0/24        #內部網絡
mon_initial_members = ceph-admin
mon_host = 10.0.0.60
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx


#手動安裝節點避免更改源到國外導致安裝失敗:
# 安裝必備軟件包[每個節點都需要安裝]:
 yum install -y ceph ceph-mon ceph-mgr ceph-radosgw ceph-mds
 
 
 #mon節點 初始化:
 [root@ceph-admin my-cluster]# ceph-deploy mon create-initial
 初始化結束后出現下面這些配置文件:
 [root@ceph-admin my-cluster]# ll
total 44
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-mds.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-mgr.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-osd.keyring
-rw------- 1 root root   113 Oct 10 00:33 ceph.bootstrap-rgw.keyring
-rw------- 1 root root   151 Oct 10 00:33 ceph.client.admin.keyring
-rw-r--r-- 1 root root   256 Oct 10 00:21 ceph.conf
-rw-r--r-- 1 root root 16135 Oct 10 00:33 ceph-deploy-ceph.log
-rw------- 1 root root    73 Oct 10 00:21 ceph.mon.keyring

配置文件還需要推送到各個節點,所以需要進行推送:
[root@ceph-admin my-cluster]# ceph-deploy admin ceph-admin ceph-node1 ceph-node2 ceph-node3


#此時ceph基本配置完畢,通過ceph-s查看狀態:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK                  #  cluster 配置完畢
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 4m)    #mon節點有一個
    mgr: no daemons active             #mgr還沒有添加
    osd: 0 osds: 0 up, 0 in            #osd資源池暫持還沒添加
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     


#配置manager監控節點[這個節點可以部署到ceph-admin也可以其他節點,這里部署到了node1]:
[root@ceph-admin my-cluster]# ceph-deploy mgr create ceph-node1
...
...
...
ceph-node1][DEBUG ] create path recursively if it doesn't exist
ceph-node1][INFO  ] Running command: ceph --cluster ceph --name client.bootstrap-mgr --keyring /var/lib/cephbootstrap-mgr/ceph.keyring auth get-or-create mgr.ceph-node1 mon allow profile mgr osd allow * mds allow * - /var/lib/ceph/mgr/ceph-ceph-node1/keyring
ceph-node1][INFO  ] Running command: systemctl enable ceph-mgr@ceph-node1
ceph-node1][WARNIN] Created symlink from /etc/systemd/system/ceph-mgr.target.wants/ceph-mgr@ceph-node1.servie to /usr/lib/systemd/system/ceph-mgr@.service.
ceph-node1][INFO  ] Running command: systemctl start ceph-mgr@ceph-node1  #執行日志
ceph-node1][INFO  ] Running command: systemctl enable ceph.target

#部署完成mgr后進行檢查:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_WARN
            OSD count 0 < osd_pool_default_size 3
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 10m)
    mgr: ceph-node1(active, since 79s)   #<----- 這里可以看到 mgr部署到了 ceph-node1 
    osd: 0 osds: 0 up, 0 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   0 B used, 0 B / 0 B avail
    pgs:     



#添加磁盤:
 每台虛擬機加1塊20G磁盤通過下面指令發現磁盤:
echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan
執行過程:
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host0/scan
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host1/scan
[root@ceph-admin my-cluster]# echo "- - -" > /sys/class/scsi_host/host2/scan
[root@ceph-admin my-cluster]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   20G  0 disk 
├─sda1            8:1    0  200M  0 part /boot
└─sda2            8:2    0 19.8G  0 part 
  └─centos-root 253:0    0 19.8G  0 lvm  /
sdb               8:16   0   20G  0 disk      #新增磁盤
sr0              11:0    1  4.2G  0 rom  


ceph-deploy osd create ceph-admin --data /dev/sdb
ceph-deploy osd create ceph-node1 --data /dev/sdb
ceph-deploy osd create ceph-node2 --data /dev/sdb
ceph-deploy osd create ceph-node3 --data /dev/sdb

4塊磁盤加入進去后檢查狀態:
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum ceph-admin (age 19m)
    mgr: ceph-node1(active, since 10m)
    osd: 4 osds: 4 up (since 4s), 4 in (since 4s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail    #剛好組成80G磁盤
    pgs:     

[root@ceph-admin my-cluster]# ceph osd tree
ID CLASS WEIGHT  TYPE NAME           STATUS REWEIGHT PRI-AFF 
-1       0.07794 root default                                
-3       0.01949     host ceph-admin                         
 0   hdd 0.01949         osd.0           up  1.00000 1.00000 
-5       0.01949     host ceph-node1                         
 1   hdd 0.01949         osd.1           up  1.00000 1.00000 
-7       0.01949     host ceph-node2                         
 2   hdd 0.01949         osd.2           up  1.00000 1.00000 
-9       0.01949     host ceph-node3                         
 3   hdd 0.01949         osd.3           up  1.00000 1.00000 
 
 
 
 #到這里一個基礎得ceph集群已經部署完畢了,它包含了 1個管理節點  一個 mgr節點  4個osd節點
 
 
 
 #mon節點擴容
 注意:mon節點擴容以奇數為准進行擴容  1 3 5這樣,所以要擴容則需要擴容兩台,可以弄 node1 node2 配置為mon節點
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node1 --address 10.0.0.61
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node2 --address 10.0.0.62
 [root@ceph-admin my-cluster]# ceph-deploy mon add ceph-node3 --address 10.0.0.63
 檢查:
 [root@ceph-admin my-cluster]# ceph -s 
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph-admin,ceph-node1,ceph-node2 (age 2s)    #可以看到添加2個mon節點后,mon節點數量到達3
    mgr: ceph-node1(active, since 21m)      #mgr 只有一個,下面進行mgr擴容
    osd: 4 osds: 4 up (since 10m), 4 in (since 10m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail
    pgs:     
 檢查:
[root@ceph-admin my-cluster]# ceph mon stat
e4: 4 mons at {ceph-admin=[v2:10.0.0.60:3300/0,v1:10.0.0.60:6789/0],ceph-node1=[v2:10.0.0.61:3300/0,v1:10.0.0.61:6789/0],ceph-node2=[v2:10.0.0.62:3300/0,v1:10.0.0.62:6789/0],ceph-node3=[v2:10.0.0.63:3300/0,v1:10.0.0.63:6789/0]}, election epoch 16, leader 0 ceph-admin, quorum 0,1,2,3 ceph-admin,ceph-node1,ceph-node2,ceph-node3

 檢查:
[root@ceph-admin my-cluster]# ceph mon dump
dumped monmap epoch 4
epoch 4
fsid ce3bead3-55ca-4b88-9dff-0c7dd4db1880
last_changed 2020-10-10 01:10:00.536702
created 2020-10-10 00:33:06.013571
min_mon_release 14 (nautilus)
0: [v2:10.0.0.60:3300/0,v1:10.0.0.60:6789/0] mon.ceph-admin
1: [v2:10.0.0.61:3300/0,v1:10.0.0.61:6789/0] mon.ceph-node1
2: [v2:10.0.0.62:3300/0,v1:10.0.0.62:6789/0] mon.ceph-node2
3: [v2:10.0.0.63:3300/0,v1:10.0.0.63:6789/0] mon.ceph-node3



節點擴容會有仲裁階段可以通過命令查看:
ceph quorum_status --format json-pretty
ceph quorum_status --format json-pretty|grep quorum_leader_name



 #mgr節點擴容:
[root@ceph-admin my-cluster]# ceph-deploy mgr create ceph-node1 ceph-node2 ceph-node3
[root@ceph-admin my-cluster]# ceph -s
  cluster:
    id:     ce3bead3-55ca-4b88-9dff-0c7dd4db1880
    health: HEALTH_OK
 
  services:
    mon: 4 daemons, quorum ceph-admin,ceph-node1,ceph-node2,ceph-node3 (age 8m)
    mgr: ceph-node1(active, since 36m), standbys: ceph-node2, ceph-node3   # 看到主從節點,node1是主節點,在node1異常,其他會頂替
    osd: 4 osds: 4 up (since 25m), 4 in (since 25m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   4.0 GiB used, 76 GiB / 80 GiB avail
    pgs:     

此時已經部署完畢了一套高可用集群
主要有 4個mon節點  3個mgr節點

ceph三大存儲方式

塊存儲 [使用較多]:

#創建和使用塊存儲


#創建pool

通常在創建pool之前,需要覆蓋默認的pg_num,官方推薦:
若少於5個OSD, 設置pg_num為128
5~10個OSD,設置pg_num為512
10~50個OSD,設置pg_num為4096

[為了演示擴容縮容num,這里設置為64,后續擴容改為128]
[root@ceph-admin my-cluster]# ceph osd pool create ceph-pool 64 64
pool 'ceph-pool' created


#查看:
[root@ceph-admin my-cluster]# ceph osd lspools                     #查看資源池
1 ceph-pool
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pg_num   #查看PG
pg_num: 64
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pgp_num  #查看PGP
pgp_num: 64
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool size     #查看副本數
size: 3
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool crush_rule   #查看股則
crush_rule: replicated_rule


擴容縮容:
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool size 2   #修改副本數為2個
set pool 1 size to 2  
[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool size     #檢查副本數
size: 2

[root@ceph-admin my-cluster]# ceph osd pool get ceph-pool pg_num    #查看pg數
pg_num: 64 
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool pg_num 128    #修改pg數  #修改了pg,同時也需要需改pgp,使得他們保持一致
set pool 1 pg_num to 128
[root@ceph-admin my-cluster]# ceph osd pool set ceph-pool pgp_num 128   #修改pgp數
set pool 1 pgp_num to 128



#設置pool大小 :
方式1:
[root@ceph-admin my-cluster]# rbd create -p ceph-pool --image rbd_test.img --size 10G    #10G的pool

方式2:
rbd create ceph-pool/rbd_test1.img --size 10G

查看:
[root@ceph-admin my-cluster]# rbd -p ceph-pool ls
rbd_test.img
rbd_test1.img

[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test.img    #查看這個塊設備得詳細信息
rbd image 'rbd_test.img':
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12c09e1a0dcd
	block_name_prefix: rbd_data.12c09e1a0dcd
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
	op_features: 
	flags: 
	create_timestamp: Sat Oct 10 01:41:24 2020
	access_timestamp: Sat Oct 10 01:41:24 2020
	modify_timestamp: Sat Oct 10 01:41:24 2020
	
[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test1.img   #查看這個塊設備得詳細信息
rbd image 'rbd_test1.img': 
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 12d0a7da2dfa
	block_name_prefix: rbd_data.12d0a7da2dfa
	format: 2
	features: layering, exclusive-lock, object-map, fast-diff, deep-flatten  #這些信息可以去除
	op_features: 
	flags: 
	create_timestamp: Sat Oct 10 01:43:40 2020
	access_timestamp: Sat Oct 10 01:43:40 2020
	modify_timestamp: Sat Oct 10 01:43:40 2020


#去除features 信息:
rbd feature disable ceph-pool/rbd_test1.img  deep-flatten
rbd feature disable ceph-pool/rbd_test1.img  fast-diff
rbd feature disable ceph-pool/rbd_test1.img exclusive-lock

[root@ceph-admin my-cluster]# rbd info ceph-pool/rbd_test1.img
rbd image 'rbd_test1.img':
	size 10 GiB in 2560 objects
	order 22 (4 MiB objects)
	snapshot_count: 0
	id: 119d1e259330
	block_name_prefix: rbd_data.119d1e259330
	format: 2
	features: layering   #這里的信息已經清除了
	op_features: 
	flags: 
	create_timestamp: Sun Oct 11 00:27:40 2020
	access_timestamp: Sun Oct 11 00:27:40 2020
	modify_timestamp: Sun Oct 11 00:27:40 2020


重點:
#### 注意 只有把features信息改為 layering 才可以去掛載這個設備
#掛載為塊設備命令:
[root@ceph-admin my-cluster]# rbd map ceph-pool/rbd_test1.img
/dev/rbd0
[root@ceph-admin my-cluster]# rbd device list
id pool       namespace image           snap   device    
0  ceph-pool            rbd_test1.img   -      /dev/rbd0 

這樣就已經將rbd_test1.img映射為了一個塊設備,這個塊設備是: /dev/rbd0 




#有2個塊設備,刪除一個的方法:
[root@ceph-admin my-cluster]# rbd rm -p ceph-pool --image rbd_test1.img
Removing image: 100% complete...done.

檢查:
[root@ceph-admin my-cluster]# rbd -p ceph-pool ls 
rbd_test.img


# 如果在其他服務器映射過,可以在那台服務器執行取消;
rbd unmap /dev/rbd0


# 掛載塊設備:
# 做了 映射為塊設備這個步驟后,ceph就會在服務器上創建一個塊設備硬盤:
# 它可以被當作一塊普通硬盤來格式化使用
[root@ceph-admin my-cluster]# fdisk -l

Disk /dev/sda: 21.5 GB, 21474836480 bytes, 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk label type: dos
Disk identifier: 0x000aef55

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *        2048      411647      204800   83  Linux
/dev/sda2          411648    41943039    20765696   8e  Linux LVM

Disk /dev/mapper/centos-root: 21.3 GB, 21260926976 bytes, 41525248 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes


Disk /dev/rbd0: 10.7 GB, 10737418240 bytes, 20971520 sectors    ### 塊設備硬盤,通過lsblk也能看到
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes


創建好了塊設備需要進行掛載,掛載方式如下:

本地掛載:
1. 格式化:
[root@ceph-admin my-cluster]# mkfs.ext4 /dev/rbd0      #格式化
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done                            
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
655360 inodes, 2621440 blocks
131072 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2151677952
80 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks: 
	32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632

Allocating group tables: done                            
Writing inode tables: done                            
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done 

#生產環境建議: resize2fs  /dev/rbd0   進行格式化,這樣不會格式化原有磁盤數據。


2. 掛載:
[root@ceph-admin my-cluster]# mkdir /rbd-demo                  #創建一個目錄
[root@ceph-admin my-cluster]# mount /dev/rbd0 /rbd-demo        #掛載到這個目錄
[root@ceph-admin my-cluster]# df -h                            #檢查
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   20G  1.8G   19G  10% /
devtmpfs                 2.0G     0  2.0G   0% /dev
tmpfs                    2.0G     0  2.0G   0% /dev/shm
tmpfs                    2.0G  8.7M  2.0G   1% /run
tmpfs                    2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/sda1                197M  103M   95M  53% /boot
tmpfs                    394M     0  394M   0% /run/user/0
/dev/rbd0                9.8G   37M  9.2G   1% /rbd-demo

3. 寫入開機自動掛載:
3.1 找到UUID
[root@ceph-admin my-cluster]# blkid 
/dev/sr0: UUID="2017-09-06-10-51-00-00" LABEL="CentOS 7 x86_64" TYPE="iso9660" PTTYPE="dos"
/dev/sda1: UUID="cb322c86-f37a-49e8-86a0-924d9b0c719e" TYPE="xfs" 
/dev/sda2: UUID="4YLVja-jT6q-FphK-3E60-5Qde-Z3gw-zLpcYT" TYPE="LVM2_member" 
/dev/mapper/centos-root: UUID="e3508edd-30c0-4e7d-9d8f-622c8dbd174e" TYPE="xfs" 
/dev/rbd0: UUID="f7abce4f-6a51-4c65-bf89-99eec90869ec" TYPE="ext4" 

3.2 寫入fstab:
echo "UUID="f7abce4f-6a51-4c65-bf89-99eec90869ec"  /rbd-demo  xfs defaults 0 0" >>/etc/fstab




遠程掛載:
如: ceph-client 需要掛載這個磁盤:

服務器操作:
 rbd create ceph-pool/rbd_test2.img --size 5G
 rbd pool init ceph-pool
 ceph-deploy admin [ceph-client name]        #ceph-client name指的客戶端主機名,這里的主機名和IP需要在admin節點hosts文件中,否則會報錯]
                                             #執行后會生成一個 ceph.client.admin.keyring 文件,用於作為掛載服務器RBD憑據
 
客戶端操作:
1. 安裝ceph工具
yum install -y ceph-common

2. 獲取ceph密鑰
 將服務器的 ceph.client.admin.keyring內容寫入到客戶機的該文件中,或者新建文件,將內容寫入到文件中.
 
3, 執行映射塊設備
rbd map ceph-pool/rbd_test1.img
取消這個映射可以執行: rbd unmap /dev/rbd0  [rbd0是相對於映射的設備而言,可能名稱不一樣]

4. 掛載到目錄:
   mkdir /root/rdb0 -p
   resize2fs  /dev/rbd0 
   mount  /dev/rbd0 /root/rdb0




#擴容:
[root@ceph-client ~]# rbd resize ceph-pool/rbd_test1.img --size 15G
Resizing image: 100% complete...done.

檢查大小變化:
[root@ceph-client ~]# df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/rbd0               ext4      9.8G   37M  9.2G   1% /root/rdb0    #大小10G 還沒改變

執行格式化:
[root@ceph-client ~]# resize2fs  /dev/rbd0     #不損耗數據的格式化
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/rbd0 is mounted on /root/rdb0; on-line resizing required
old_desc_blocks = 2, new_desc_blocks = 2
The filesystem on /dev/rbd0 is now 3932160 blocks long.

再次查看:
[root@ceph-client ~]# df -Th
Filesystem              Type      Size  Used Avail Use% Mounted on
/dev/rbd0               ext4       15G   41M   14G   1% /root/rdb0    # 已經增加了大小


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM