一、准备环境
1.主机名
hostnamectl set-hostname ceph4 hostnamectl set-hostname ceph5 hostnamectl set-hostname ceph6
2.关闭防火墙
systemctl stop firewalld systemctl disable firewalld
3.关闭selinux
sed -i 's/=enforcing/=disabled/' /etc/selinux/config
4.设置时区
timedatectl set-timezone Asia/Shanghai
5.设备列表
cat /etc/hosts 10.1.1.24 ceph4 10.1.1.25 ceph5 10.1.1.26 ceph6
6.时钟同步
yum -y install chrony vi /etc/chrony.conf server ntp1.aliyun.com iburst server 0.centos.pool.ntp.org iburst allow 10.1.1.0/24 systemctl restart chronyd chronyc sources
7.配置免密
8.系统版本
cat /etc/redhat-release CentOS Linux release 7.6.1810 (Core)
9.yum源
yum install wget -y cd /etc/yum.repos.d mkdir bak mv ./* bak wget http://mirrors.163.com/.help/CentOS7-Base-163.repo yum install epel-release -y yum install centos-release-ceph-luminous -y
10.安装ceph软件包
yum -y install ceph ceph-radosgw
二、部署mon
1)登录ceph4,查看ceph目录是否已经生成
ls /etc/ceph rbdmap
2)生成ceph配置文件
touch /etc/ceph/ceph.conf
3)生成ceph集群的id
uuidgen 1f0490cd-f938-4e20-8ea5-d817d941a6e6
4)配置/etc/ceph/ceph.conf
[global] fsid = 1f0490cd-f938-4e20-8ea5-d817d941a6e6 #设置ceph4为mon节点 mon initial members = ceph4 #设置mon节点地址 mon host = 10.1.1.24 public network = 10.1.1.0/24 cluster network = 10.1.1.0/24 auth cluster required = cephx auth service required = cephx auth client required = cephx osd journal size = 1024 #设置副本数 osd pool default size = 3 #设置最小副本数 osd pool default min size = 1 osd pool default pg num = 64 osd pool default pgp num = 64 osd crush chooseleaf type = 1 osd_mkfs_type = xfs max mds = 5 mds max file size = 100000000000000 mds cache size = 1000000 #设置osd节点down后900s,把此osd节点逐出ceph集群,把之前映射到此节点的数据映射到其他节点。 mon osd down out interval = 900 [mon] #把时钟偏移设置成0.5s,默认是0.05s,由于ceph集群中存在异构PC,导致时钟偏移总是大于0.05s,为了方便同步直接把时钟偏移设置成0.5s mon clock drift allowed = .50
5)为监控节点创建管理密钥
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' creating /tmp/ceph.mon.keyring cat /tmp/ceph.mon.keyring [mon.] key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ== caps mon = "allow *"
6)为ceph admin用户创建管理集群的密钥并赋予访问权限
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' creating /etc/ceph/ceph.client.admin.keyring cat /etc/ceph/ceph.client.admin.keyring [client.admin] key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw== auid = 0 caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *"
7)生成一个引导osd密钥环,生成一个client.bootstrap-osd用户并将用户添加到密钥环中
sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' creating /var/lib/ceph/bootstrap-osd/ceph.keyring cat /var/lib/ceph/bootstrap-osd/ceph.keyring [client.bootstrap-osd] key = AQAo9ctcFpF5OxAAMU+pcJg4SqYWUPmDhiJGLA== caps mon = "profile bootstrap-osd"
8)将生成的密钥添加到ceph.mon.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring importing contents of /etc/ceph/ceph.client.admin.keyring into /tmp/ceph.mon.keyring [root@ceph4 ceph]# cat /tmp/ceph.mon.keyring [mon.] key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ== caps mon = "allow *" [client.admin] key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw== auid = 0 caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *" ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring importing contents of /var/lib/ceph/bootstrap-osd/ceph.keyring into /tmp/ceph.mon.keyring cat /tmp/ceph.mon.keyring [mon.] key = AQCJ9MtcXNVUNxAAYBFhWeeTWC9gQKdrc9VCGQ== caps mon = "allow *" [client.admin] key = AQDd9MtcvduhDhAAa8JMDv/2KokI2qQoX9JZyw== auid = 0 caps mds = "allow *" caps mgr = "allow *" caps mon = "allow *" caps osd = "allow *" [client.bootstrap-osd] key = AQAo9ctcFpF5OxAAMU+pcJg4SqYWUPmDhiJGLA== caps mon = "profile bootstrap-osd"
9)使用主机名、主机IP地址和FSID生成monmap,把它保存成/tmp/monmap
monmaptool --create --add ceph4 10.1.1.24 --fsid 1f0490cd-f938-4e20-8ea5-d817d941a6e6 /tmp/monmap monmaptool: monmap file /tmp/monmap monmaptool: set fsid to 1f0490cd-f938-4e20-8ea5-d817d941a6e6 monmaptool: writing epoch 0 to /tmp/monmap (1 monitors)
10)创建一个默认的数据目录
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-ceph4
11)修改ceph.mon.keyring属主属组为ceph
chown ceph.ceph /tmp/ceph.mon.keyring
12)初始化mon
ll /var/lib/ceph/mon/ceph-ceph4/ total 0 sudo -u ceph ceph-mon --mkfs -i ceph4 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring ll /var/lib/ceph/mon/ceph-ceph4/ total 8 -rw------- 1 ceph ceph 77 May 3 16:10 keyring -rw-r--r-- 1 ceph ceph 8 May 3 16:10 kv_backend drwxr-xr-x 2 ceph ceph 112 May 3 16:10 store.db
13)为了防止重新被安装创建一个空的done文件
sudo touch /var/lib/ceph/mon/ceph-ceph4/done
14)启动mon
systemctl start ceph-mon@ceph4
15)查看运行状态
systemctl status ceph-mon@ceph4
16)设置开机自启
systemctl enable ceph-mon@ceph4
2.新增mon节点ceph5,ceph6
1)把ceph4上生成的配置文件和密钥文件拷贝到ceph5,ceph6
scp /etc/ceph/* root@ceph5:/etc/ceph/ scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@ceph5:/var/lib/ceph/bootstrap-osd/ scp /tmp/ceph.mon.keyring root@ceph5:/tmp/ceph.mon.keyring scp /etc/ceph/* root@ceph6:/etc/ceph/ scp /var/lib/ceph/bootstrap-osd/ceph.keyring root@ceph6:/var/lib/ceph/bootstrap-osd/ scp /tmp/ceph.mon.keyring root@ceph6:/tmp/ceph.mon.keyring
2)在ceph5上创建一个默认的数据目录
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-ceph5
3)在ceph5上修改ceph.mon.keyring属主和属组为ceph
chown ceph.ceph /tmp/ceph.mon.keyring
4)获取密钥和monmap信息
#auth无需做 ceph auth get mon. -o /tmp/ceph.mon.keyring exported keyring for mon. #需要获取下monmap,下面初始化用 ceph mon getmap -o /tmp/ceph.mon.map got monmap epoch 1
5)初始化mon
sudo -u ceph ceph-mon --mkfs -i ceph5 --monmap /tmp/ceph.mon.map --keyring /tmp/ceph.mon.keyring
6)为了防止重新被安装创建一个空的done文件
sudo touch /var/lib/ceph/mon/ceph-ceph5/done
7)将新的mon节点添加至ceph集群的mon列表
ceph mon add ceph5 10.1.1.25:6789
8)启动新加的mon
systemctl start ceph-mon@ceph5 systemctl status ceph-mon@ceph5
9)设置mon开机自动启动
systemctl enable ceph-mon@ceph5
10)创建完成后ceph -s 查看集群状态
ceph -s
注意:jewel版本中HEALTH_ERR是由于还没有添加osd
三、部署osd
1)添加osd之前先在crush图中创建3个名称分别为ceph4,ceph5,ceph6的bucket
添加前 ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0 root default [root@ceph4 ~]# ceph osd crush add-bucket ceph4 host added bucket ceph4 type host to crush map [root@ceph4 ~]# ceph osd crush add-bucket ceph5 host added bucket ceph5 type host to crush map [root@ceph4 ~]# ceph osd crush add-bucket ceph6 host added bucket ceph6 type host to crush map 添加后 ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -4 0 host ceph6 -3 0 host ceph5 -2 0 host ceph4 -1 0 root default
2)把3个bucket移动到默认的root下
ceph osd crush move ceph4 root=default moved item id -2 name 'ceph4' to location {root=default} in crush map ceph osd crush move ceph5 root=default moved item id -3 name 'ceph5' to location {root=default} in crush map ceph osd crush move ceph6 root=default moved item id -4 name 'ceph6' to location {root=default} in crush map 移动后 ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0 root default -2 0 host ceph4 -3 0 host ceph5 -4 0 host ceph6
3)准备磁盘,即已完成osd加入动作
ceph-disk prepare /dev/sdb Setting name! partNum is 0 REALLY setting name! The operation has completed successfully. Setting name! partNum is 1 REALLY setting name! The operation has completed successfully. The operation has completed successfully. meta-data=/dev/sdb1 isize=2048 agcount=4, agsize=6400 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=0, sparse=0 data = bsize=4096 blocks=25600, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=864, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. sdb 8:16 0 5G 0 disk ├─sdb1 8:17 0 100M 0 part /var/lib/ceph/osd/ceph-0 └─sdb2 8:18 0 4.9G 0 part
注意luminous版部署,下面4,5,6,7,8,9,10,11,12,13,14,不用做啦,新版中已经将其挂载,不能再去格式化啦
4)创建第一个osd
ceph osd create 0
5)对第一个分区进行格式化
mkfs.xfs -f /dev/sdb1
6)创建osd默认的数据目录
mkdir -p /var/lib/ceph/osd/ceph-0
7)对分区进行挂载
mount /dev/sdb1 /var/lib/ceph/osd/ceph-0
8)添加自动挂载信息
echo "/dev/sdb1 /var/lib/ceph/osd/ceph-0 xfs defaults 0 0" >> /etc/fstab
9)初始化osd数据目录
ceph-osd -i 0 --mkfs --mkkey
10)添加key
ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring added key for osd.0
11)把新建的osd添加到crush中
ceph osd crush add osd.0 1.0 host=ceph4
12)修改osd数据目录的属主属组为ceph
chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
13)启动新增的osd
systemctl start ceph-osd@0 systemctl status ceph-osd@0
14)设置osd开机自动启动
systemctl enable ceph-osd@0
15)查看ceph osd tree状态
ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0.04312 root default -2 0.01437 host ceph4 0 hdd 0.00479 osd.0 up 1.00000 1.00000 3 hdd 0.00479 osd.3 up 1.00000 1.00000 6 hdd 0.00479 osd.6 up 1.00000 1.00000 -3 0.01437 host ceph5 1 hdd 0.00479 osd.1 up 1.00000 1.00000 4 hdd 0.00479 osd.4 up 1.00000 1.00000 7 hdd 0.00479 osd.7 up 1.00000 1.00000 -4 0.01437 host ceph6 2 hdd 0.00479 osd.2 up 1.00000 1.00000 5 hdd 0.00479 osd.5 up 1.00000 1.00000 8 hdd 0.00479 osd.8 up 1.00000 1.00000
16)查看集群状态
ceph -s cluster: id: 1f0490cd-f938-4e20-8ea5-d817d941a6e6 health: HEALTH_WARN no active mgr services: mon: 3 daemons, quorum ceph4,ceph5,ceph6 mgr: no daemons active osd: 9 osds: 9 up, 9 in data: pools: 0 pools, 0 pgs objects: 0 objects, 0B usage: 0B used, 0B / 0B avail pgs:
ceph df GLOBAL: SIZE AVAIL RAW USED %RAW USED 0B 0B 0B 0 POOLS: NAME ID USED %USED MAX AVAIL OBJECTS
新版默认没有pool
四、部署mgr
1)ceph4部署
ceph --cluster ceph auth get-or-create mgr.ceph4 mon 'allow profile mgr' osd 'allow *' mds 'allow *' [mgr.ceph4] key = AQBHFcxciXa/LRAAyuJBnVr8M2ICIwxNPw5XcQ== mkdir /var/lib/ceph/mgr/ceph-ceph4/ ceph --cluster ceph auth get-or-create mgr.ceph4 -o /var/lib/ceph/mgr/ceph-ceph4/keyring systemctl start ceph-mgr@ceph4 systemctl enable ceph-mgr@ceph4
2)ceph5部署
ceph --cluster ceph auth get-or-create mgr.ceph5 mon 'allow profile mgr' osd 'allow *' mds 'allow *' mkdir /var/lib/ceph/mgr/ceph-ceph5/ ceph --cluster ceph auth get-or-create mgr.ceph5 -o /var/lib/ceph/mgr/ceph-ceph5/keyring systemctl start ceph-mgr@ceph5 systemctl enable ceph-mgr@ceph5
3)ceph6部署
ceph --cluster ceph auth get-or-create mgr.ceph6 mon 'allow profile mgr' osd 'allow *' mds 'allow *' mkdir /var/lib/ceph/mgr/ceph-ceph6/ ceph --cluster ceph auth get-or-create mgr.ceph6 -o /var/lib/ceph/mgr/ceph-ceph6/keyring systemctl start ceph-mgr@ceph6 systemctl enable ceph-mgr@ceph6
初步完毕rados