ceph之centos7.6.1810手動部署ceph-luminous


一、准備環境

1.主機名

hostnamectl set-hostname ceph4
hostnamectl set-hostname ceph5
hostnamectl set-hostname ceph6

  

2.關閉防火牆

systemctl stop firewalld
systemctl disable firewalld

  

3.關閉selinux

sed -i 's/=enforcing/=disabled/' /etc/selinux/config

  

4.設置時區

timedatectl set-timezone Asia/Shanghai

  

5.設備列表

cat /etc/hosts
10.1.1.24   ceph4
10.1.1.25   ceph5
10.1.1.26   ceph6

  

6.時鍾同步

yum -y install chrony

vi /etc/chrony.conf
server ntp1.aliyun.com iburst
server 0.centos.pool.ntp.org iburst
allow 10.1.1.0/24

systemctl restart chronyd

chronyc sources

  

7.配置免密

 

8.系統版本

cat /etc/redhat-release 
CentOS Linux release 7.6.1810 (Core)

  

9.yum源

yum install wget -y
cd /etc/yum.repos.d
mkdir bak
mv ./* bak
wget http://mirrors.163.com/.help/CentOS7-Base-163.repo
yum install epel-release -y
yum install centos-release-ceph-luminous -y

  

10.安裝ceph軟件包

yum -y install ceph ceph-radosgw

  

二、部署mon

1)登錄ceph4,查看ceph目錄是否已經生成

ls /etc/ceph
rbdmap

  

2)生成ceph配置文件

touch /etc/ceph/ceph.conf

  

3)生成ceph集群的id

uuidgen
1f0490cd-f938-4e20-8ea5-d817d941a6e6

  

4)配置/etc/ceph/ceph.conf

[global]
fsid = 1f0490cd-f938-4e20-8ea5-d817d941a6e6
#設置ceph4為mon節點
mon initial members = ceph4
#設置mon節點地址
mon host = 10.1.1.24
public network = 10.1.1.0/24
cluster network = 10.1.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
#設置副本數
osd pool default size = 3
#設置最小副本數
osd pool default min size = 1
osd pool default pg num = 64
osd pool default pgp num = 64
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
#設置osd節點down后900s,把此osd節點逐出ceph集群,把之前映射到此節點的數據映射到其他節點。
mon osd down out interval = 900

[mon]
#把時鍾偏移設置成0.5s,默認是0.05s,由於ceph集群中存在異構PC,導致時鍾偏移總是大於0.05s,為了方便同步直接把時鍾偏移設置成0.5s
mon clock drift allowed = .50

  

5)為監控節點創建管理密鑰

ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
creating /tmp/ceph.mon.keyring

cat /tmp/ceph.mon.keyring 
[mon.]
	key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ==
	caps mon = "allow *"

  

6)為ceph admin用戶創建管理集群的密鑰並賦予訪問權限

ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
creating /etc/ceph/ceph.client.admin.keyring

cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
	key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw==
	auid = 0
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"

  

7)生成一個引導osd密鑰環,生成一個client.bootstrap-osd用戶並將用戶添加到密鑰環中

sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
creating /var/lib/ceph/bootstrap-osd/ceph.keyring

cat /var/lib/ceph/bootstrap-osd/ceph.keyring
[client.bootstrap-osd]
	key = AQAo9ctcFpF5OxAAMU+pcJg4SqYWUPmDhiJGLA==
	caps mon = "profile bootstrap-osd"

  

8)將生成的密鑰添加到ceph.mon.keyring  

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
importing contents of /etc/ceph/ceph.client.admin.keyring into /tmp/ceph.mon.keyring

[root@ceph4 ceph]# cat /tmp/ceph.mon.keyring 
[mon.]
	key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ==
	caps mon = "allow *"
[client.admin]
	key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw==
	auid = 0
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into /tmp/ceph.mon.keyring

cat /tmp/ceph.mon.keyring 
[mon.]
	key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ==
	caps mon = "allow *"
[client.admin]
	key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw==
	auid = 0
	caps mds = "allow *"
	caps mgr = "allow *"
	caps mon = "allow *"
	caps osd = "allow *"
[client.bootstrap-osd]
	key = AQAo9ctcFpF5OxAAMU+pcJg4SqYWUPmDhiJGLA==
	caps mon = "profile bootstrap-osd"

  

9)使用主機名、主機IP地址和FSID生成monmap,把它保存成/tmp/monmap

monmaptool --create --add ceph4 10.1.1.24 --fsid 1f0490cd-f938-4e20-8ea5-d817d941a6e6 /tmp/monmap
monmaptool: monmap file /tmp/monmap
monmaptool: set fsid to 1f0490cd-f938-4e20-8ea5-d817d941a6e6
monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)

  

10)創建一個默認的數據目錄

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-ceph4

  

11)修改ceph.mon.keyring屬主屬組為ceph

chown ceph.ceph /tmp/ceph.mon.keyring

  

12)初始化mon

ll /var/lib/ceph/mon/ceph-ceph4/
total 0

sudo -u ceph ceph-mon --mkfs -i ceph4 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

ll /var/lib/ceph/mon/ceph-ceph4/
total 8
-rw------- 1 ceph ceph  77 May  3 16:10 keyring
-rw-r--r-- 1 ceph ceph   8 May  3 16:10 kv_backend
drwxr-xr-x 2 ceph ceph 112 May  3 16:10 store.db

  

13)為了防止重新被安裝創建一個空的done文件

sudo touch /var/lib/ceph/mon/ceph-ceph4/done

  

14)啟動mon

systemctl start ceph-mon@ceph4

  

15)查看運行狀態

systemctl status ceph-mon@ceph4

  

16)設置開機自啟

systemctl enable ceph-mon@ceph4

   

2.新增mon節點ceph5,ceph6

1)把ceph4上生成的配置文件和密鑰文件拷貝到ceph5,ceph6

scp /etc/ceph/* root@ceph5:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@ceph5:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring root@ceph5:/tmp/ceph.mon.keyring

scp /etc/ceph/* root@ceph6:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@ceph6:/var/lib/ceph/bootstrap-osd/
scp /tmp/ceph.mon.keyring root@ceph6:/tmp/ceph.mon.keyring

  

2)在ceph5上創建一個默認的數據目錄

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-ceph5

  

3)在ceph5上修改ceph.mon.keyring屬主和屬組為ceph

chown ceph.ceph /tmp/ceph.mon.keyring

  

4)獲取密鑰和monmap信息

#auth無需做
ceph auth get mon. -o /tmp/ceph.mon.keyring
exported keyring for mon.
#需要獲取下monmap,下面初始化用
ceph mon getmap -o /tmp/ceph.mon.map
got monmap epoch 1

  

5)初始化mon

sudo -u ceph ceph-mon --mkfs -i ceph5 --monmap /tmp/ceph.mon.map --keyring /tmp/ceph.mon.keyring

  

6)為了防止重新被安裝創建一個空的done文件

sudo touch /var/lib/ceph/mon/ceph-ceph5/done

  

7)將新的mon節點添加至ceph集群的mon列表

ceph mon add ceph5 10.1.1.25:6789

  

8)啟動新加的mon

systemctl start ceph-mon@ceph5
systemctl status ceph-mon@ceph5

  

9)設置mon開機自動啟動

systemctl enable ceph-mon@ceph5

  

10)創建完成后ceph -s 查看集群狀態

ceph -s

  

注意:jewel版本中HEALTH_ERR是由於還沒有添加osd

 

三、部署osd

1)添加osd之前先在crush圖中創建3個名稱分別為ceph4,ceph5,ceph6的bucket

添加前
ceph osd tree
ID CLASS WEIGHT TYPE NAME    STATUS REWEIGHT PRI-AFF 
-1            0 root default  

[root@ceph4 ~]# ceph osd crush add-bucket ceph4 host
added bucket ceph4 type host to crush map
[root@ceph4 ~]# ceph osd crush add-bucket ceph5 host
added bucket ceph5 type host to crush map
[root@ceph4 ~]# ceph osd crush add-bucket ceph6 host
added bucket ceph6 type host to crush map

添加后
ceph osd tree
ID CLASS WEIGHT TYPE NAME    STATUS REWEIGHT PRI-AFF 
-4            0 host ceph6                           
-3            0 host ceph5                           
-2            0 host ceph4                           
-1            0 root default 

  

2)把3個bucket移動到默認的root下

ceph osd crush move ceph4 root=default
moved item id -2 name 'ceph4' to location {root=default} in crush map
ceph osd crush move ceph5 root=default
moved item id -3 name 'ceph5' to location {root=default} in crush map
ceph osd crush move ceph6 root=default
moved item id -4 name 'ceph6' to location {root=default} in crush map

移動后
ceph osd tree
ID CLASS WEIGHT TYPE NAME      STATUS REWEIGHT PRI-AFF 
-1            0 root default                           
-2            0     host ceph4                         
-3            0     host ceph5                         
-4            0     host ceph6 

  

3)准備磁盤,即已完成osd加入動作

ceph-disk prepare  /dev/sdb
Setting name!
partNum is 0
REALLY setting name!
The operation has completed successfully.
Setting name!
partNum is 1
REALLY setting name!
The operation has completed successfully.
The operation has completed successfully.
meta-data=/dev/sdb1              isize=2048   agcount=4, agsize=6400 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=25600, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=864, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
Warning: The kernel is still using the old partition table.
The new table will be used at the next reboot.
The operation has completed successfully.

sdb               8:16   0    5G  0 disk 
├─sdb1            8:17   0  100M  0 part /var/lib/ceph/osd/ceph-0
└─sdb2            8:18   0  4.9G  0 part 

  

注意luminous版部署,下面4,5,6,7,8,9,10,11,12,13,14,不用做啦,新版中已經將其掛載,不能再去格式化啦

4)創建第一個osd

ceph osd create
0

  

5)對第一個分區進行格式化

mkfs.xfs -f /dev/sdb1

  

6)創建osd默認的數據目錄

mkdir -p /var/lib/ceph/osd/ceph-0

  

7)對分區進行掛載

mount /dev/sdb1  /var/lib/ceph/osd/ceph-0

  

8)添加自動掛載信息

echo "/dev/sdb1 /var/lib/ceph/osd/ceph-0 xfs defaults 0 0" >> /etc/fstab

  

9)初始化osd數據目錄

ceph-osd -i 0 --mkfs  --mkkey

  

10)添加key

ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
added key for osd.0

  

11)把新建的osd添加到crush中

ceph osd crush add osd.0 1.0 host=ceph4

  

12)修改osd數據目錄的屬主屬組為ceph

chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/

  

13)啟動新增的osd

systemctl start ceph-osd@0
systemctl status ceph-osd@0

  

14)設置osd開機自動啟動

systemctl enable ceph-osd@0

  

15)查看ceph osd tree狀態

ceph osd tree
ID CLASS WEIGHT  TYPE NAME      STATUS REWEIGHT PRI-AFF 
-1       0.04312 root default                           
-2       0.01437     host ceph4                         
 0   hdd 0.00479         osd.0      up  1.00000 1.00000 
 3   hdd 0.00479         osd.3      up  1.00000 1.00000 
 6   hdd 0.00479         osd.6      up  1.00000 1.00000 
-3       0.01437     host ceph5                         
 1   hdd 0.00479         osd.1      up  1.00000 1.00000 
 4   hdd 0.00479         osd.4      up  1.00000 1.00000 
 7   hdd 0.00479         osd.7      up  1.00000 1.00000 
-4       0.01437     host ceph6                         
 2   hdd 0.00479         osd.2      up  1.00000 1.00000 
 5   hdd 0.00479         osd.5      up  1.00000 1.00000 
 8   hdd 0.00479         osd.8      up  1.00000 1.00000 

  

16)查看集群狀態

ceph -s
  cluster:
    id:     1f0490cd-f938-4e20-8ea5-d817d941a6e6
    health: HEALTH_WARN
            no active mgr
 
  services:
    mon: 3 daemons, quorum ceph4,ceph5,ceph6
    mgr: no daemons active
    osd: 9 osds: 9 up, 9 in
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0B
    usage:   0B used, 0B / 0B avail
    pgs:   

  

ceph df
GLOBAL:
    SIZE     AVAIL     RAW USED     %RAW USED 
      0B        0B           0B             0 
POOLS:
    NAME     ID     USED     %USED     MAX AVAIL     OBJECTS 

  

新版默認沒有pool

 

四、部署mgr

1)ceph4部署

ceph --cluster ceph auth get-or-create mgr.ceph4 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph4]
	key = AQBHFcxciXa/LRAAyuJBnVr8M2ICIwxNPw5XcQ==

mkdir /var/lib/ceph/mgr/ceph-ceph4/

ceph --cluster ceph auth get-or-create mgr.ceph4 -o /var/lib/ceph/mgr/ceph-ceph4/keyring


systemctl start ceph-mgr@ceph4
systemctl enable ceph-mgr@ceph4

  

2)ceph5部署

ceph --cluster ceph auth get-or-create mgr.ceph5 mon 'allow profile mgr' osd 'allow *' mds 'allow *'

mkdir /var/lib/ceph/mgr/ceph-ceph5/

ceph --cluster ceph auth get-or-create mgr.ceph5 -o /var/lib/ceph/mgr/ceph-ceph5/keyring


systemctl start ceph-mgr@ceph5
systemctl enable ceph-mgr@ceph5

  

3)ceph6部署

ceph --cluster ceph auth get-or-create mgr.ceph6 mon 'allow profile mgr' osd 'allow *' mds 'allow *'

mkdir /var/lib/ceph/mgr/ceph-ceph6/

ceph --cluster ceph auth get-or-create mgr.ceph6 -o /var/lib/ceph/mgr/ceph-ceph6/keyring


systemctl start ceph-mgr@ceph6
systemctl enable ceph-mgr@ceph6

  

初步完畢rados

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM