Ceph集群部署(基於Luminous版)


環境

兩個節點:ceph0、ceph1

ceph0: mon.a、mds、mgr、osd.0、osd.1
ceph1: mon.b、osd.2、osd.3

操作系統:ubuntu14.04
網絡配置:

ceph1: 管理網絡,eth0,192.168.31.100
       存儲網絡,eth1, 172.16.31.100
ceph2: 管理網絡,eth0,192.168.31.101
       存儲網絡,eth1, 172.16.31.101

安裝

root@:~# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
root@:~# echo deb http://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
root@:~# apt-get update && apt-get upgrade
root@:~# apt-get install ceph-mds ceph

部署mon

兩台機器都部署mon: a、b

mon.a

配置:

[global]
max open files = 131072
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 1024
osd pool default pgp num = 1024
osd crush chooseleaf type = 1
mon osd full ratio = .95
mon osd nearfull ratio = .85
public network = 192.168.31.0/24
cluster network = 172.168.31.0/24
[mon]
mon initial members = a
mon data = /var/lib/ceph/mon/$cluster-$id
mon allow pool delete = true
[mon.a]
host = ceph0
mon addr = 192.168.31.100:6789

為群集創建密鑰環, 並生成monitor secret key:

root@:~# sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'

生成管理員密鑰環,生成client.admin用戶並將該用戶添加到密鑰環:

root@:~#  sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'

生成bootstrap-osd密鑰環,生成一個client.bootstrap-osd用戶並將該用戶添加到密鑰環:

root@:~#  sudo -u ceph ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'

將管理員密鑰環、bootstrap-osd密鑰環添加到ceph.mon.keyring:

root@:~#  sudo -u ceph ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring 
root@:~#  sudo -u ceph ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring

用規划好的主機名、對應IP地址、和 fsid 生成一個monitor map,並保存為 /etc/ceph/monmap:

root@:~#  sudo -u ceph monmaptool --create --add a 192.168.31.100 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /etc/ceph/monmap 

創建monitor數據目錄:

root@:~#  sudo -u ceph mkdir /var/lib/ceph/mon/ceph-a

用monitor map和密鑰環初始化數據:

root@:~# sudo -u ceph ceph-mon --cluster ceph --mkfs -i a --monmap /etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring

啟動mon.a:

root@:~# /etc/init.d/ceph start mon.a

mon.b

添加配置,並同步到新節點:

[mon]
mon initial members = a b
[mon.b]
host = ceph1
mon addr = 192.168.31.101:6789

/etc/ceph/monmap /etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.mon.keyring 也一起同步到新節點
獲取集群已有的mon.keyring:

root@:~# sudo -u ceph ceph auth get mon. -o /tmp/mon.keyring

獲取集群已有的mon.map:

root@:~# sudo -u ceph ceph mon get map -o /tmp/mon.map

將/tmp/mon.keyring、/tmp/mon.map同步到新節點
新節點創建mon目錄:

root@:~# sudo -u ceph mkdir /var/lib/ceph/mon/ceph-b 

用monitor map和密鑰環初始化新節點mon數據:

root@:~# sudo -u ceph ceph-mon --cluster ceph --mkfs -i b --monmap /tmp/mon.map  --keyring /tmp/mon.keyring 

#或者
root@:~# sudo -u ceph ceph-mon --cluster ceph --mkfs -i b --monmap /etc/ceph/monmap  --keyring /etc/ceph/ceph.mon.keyring 

啟動新節點:

root@:~# /etc/init.d/ceph start mon.b 

查看:

root@:~# ceph -s 
  services:
    mon: 2 daemons, quorum a,b

部署OSD

配置:

[osd.0]
host =  ceph0
devs = /dev/sdb1
public addr = 192.168.31.100
cluster addr = 172.16.31.100
[osd.1]
host =  ceph0
devs = /dev/sdc1
public addr = 192.168.31.100
cluster addr = 172.16.31.100
[osd.2]
host =  ceph1
devs = /dev/sdb1
public addr = 192.168.31.101
cluster addr = 172.16.31.101
[osd.3]
host =  ceph1
devs = /dev/sdc1
public addr = 192.168.31.101
cluster addr = 172.16.31.101

將配置文件同步到各個OSD節點,同時需要同步:
/etc/ceph/ceph.client.admin.keyring

/var/lib/ceph/bootstrap-osd/ceph.keyring

/etc/ceph/ceph.mon.keyring

方法一:

每個節點都xfs格式化好硬盤並掛載並掛載到對應的OSD目錄

root@ceph0:~# mkfs.xfs  /dev/sdb1 
root@ceph0:~# mkfs.xfs  /dev/sdc1 
root@ceph0:~# mkdir -p /var/lib/ceph/osd/ceph-0/
root@ceph0:~# mkdir -p /var/lib/ceph/osd/ceph-1/
root@ceph0:~# mount /dev/sdb1 /var/lib/ceph/osd/ceph-0/
root@ceph0:~# mount /dev/sdc1 /var/lib/ceph/osd/ceph-1/

部署osd.0
ps:無論osd在哪個節點上,ceph osd create都要在mon所在服務器上執行

root@ceph0:~# ceph osd create  
root@ceph0:~# ceph-osd -i 0 --mkfs --mkkey   
root@ceph0:~# ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring 
root@ceph0:~# ceph osd crush add osd.0 0.2 root=default host=ceph1
root@ceph0:~# ceph-osd -i 0

同理部署其他osd
ps: 不同服務器時,需先復制ceph.conf到對應服務器上

root@ceph0:~# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 0.79999 root default                                     
-2 0.39999     host ceph1                                   
 0 0.20000         osd.0       up  1.00000          1.00000 
 1 0.20000         osd.1       up  1.00000          1.00000 
-3 0.39999     host ceph2                                   
 2 0.20000         osd.2       up  1.00000          1.00000 
 3 0.20000         osd.3       up  1.00000          1.00000 

方法二:

方法二直接參考自官網:https://docs.ceph.com/docs/master/install/manual-deployment/

為OSD生成UUID:

root@ceph0:~# UUID=$(uuidgen)

為OSD生成一個cephx密鑰:

root@ceph0:~# OSD_SECRET=$(ceph-authtool --gen-print-key)

創建OSD:

root@ceph0:~# ID=$(echo "{\"cephx_secret\": \"$OSD_SECRET\"}" | \
   ceph osd new $UUID -i - \
   -n client.bootstrap-osd -k /var/lib/ceph/bootstrap-osd/ceph.keyring)

創建OSD數據目錄,並格式化掛載設備到數據目錄

root@ceph0:~# mkdir /var/lib/ceph/osd/ceph-$ID
root@ceph0:~# mkfs.xfs /dev/${DEV}
root@ceph0:~# mount /dev/${DEV} /var/lib/ceph/osd/ceph-$ID

將秘鑰寫入OSD keyring文件:

root@ceph0:~# ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-$ID/keyring \
     --name osd.$ID --add-key $OSD_SECRET

初始化OSD數據目錄

root@ceph0:~# ceph-osd -i $ID --mkfs --osd-uuid $UUID
root@ceph0:~# chown -R ceph:ceph /var/lib/ceph/osd/ceph-$ID

啟動OSD:

root@ceph0:~# ceph-osd -i $ID #或者:/etc/init.d/ceph start osd.$ID

查看:

root@ceph0:~# ceph osd tree
ID CLASS WEIGHT  TYPE NAME      STATUS REWEIGHT PRI-AFF 
-1       0.34639 root default                           
-2       0.24879     host ceph0                         
 0   hdd 0.20000         osd.0      up  1.00000 1.00000 
 1   hdd 0.04880         osd.1      up  1.00000 1.00000 
-5       0.09760     host ceph1                         
 2   hdd 0.04880         osd.2      up  1.00000 1.00000 
 3   hdd 0.04880         osd.3      up  1.00000 1.00000 

OSD應用:

創建存儲池:

root@ceph0:~# rados mkpool database_pool

在該存儲池里新建鏡像:

root@ceph0r:~# rbd create database_pool/db0 --size 4096 --image-feature layering

映射到塊設備中:

root@ceph0ter:~# rbd map database_pool/db0
/dev/rbd0
root@ceph0:~# rbd showmapped 
id pool          image snap device    
0  database_pool db0   -    /dev/rbd0 

格式化並掛載:

root@ceph0:~# mkfs.xfs /dev/rbd0 
root@ceph0:~# mount /dev/rbd0 /rbdfs

部署mgr

配置:

[mgr]
mgr modules = dashboard balancer
mgr data = /var/lib/ceph/mgr/$cluster-$id

創建mgr數據目錄:

root@:~# sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-m/

create an authentication key for your daemon:

root@:~# sudo -u ceph ceph auth get-or-create mgr.m  mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-m/keyring

啟動mgr:

root@:~# ceph-mgr --cluster ceph  -i m

列出mgr module

root@:~# ceph mgr module ls
{
    "enabled_modules": [
        "balancer",
        "restful",
        "status"
    ],
    "disabled_modules": [
        "dashboard",
        "influx",
        "localpool",
        "prometheus",
        "selftest",
        "zabbix"
    ]
}

啟用dashboard

root@:~#  ceph mgr module enable dashboard

列出由mgr模塊提供的服務endpoint

root@:~# ceph mgr services
{
    "dashboard": "http://ceph0:7000/"
}

部署mds

配置:

[mds.m]
host = ceph0

創建mgs數據目錄:

root@:~#  sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-m/

創建秘鑰環:

root@:~#   ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-m/keyring --gen-key -n mds.m

導入秘鑰環:

root@:~# ceph auth add mds.m osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-m/keyring

啟動mds:

root@:~# ceph-mds --cluster ceph -i m -m 192.168.31.100:6789  

cephfs應用:

新建cephfs:

root@ceph0:~# ceph osd pool create cephfs_data 64
root@ceph0:~# ceph osd pool create cephfs_m 16
root@ceph0:~# ceph fs new mycephfs cephfs_m cephfs_data 
root@ceph0:~# ceph fs ls
name: mycephfs, metadata pool: cephfs_m, data pools: [cephfs_data ]

掛載:

root@ceph0:~# mount -t ceph 192.168.31.100:6789:/ /cephfs/ -o name=admin,secret=$(cat /etc/ceph/ceph.client.admin.keyring  | grep -A1 "\[client.admin\]" | awk -F"[ =]+" '/key/{print $2}')

因為開啟了cephx認證,所以mount需要添加-o name=admin,secret=xxxx進行認證,否者直接掛載會報如下錯誤:

root@ceph0:~# mount -t ceph 192.168.31.100:6789:/ /cephfs/
mount error 22 = Invalid argument
root@ceph0:~# dmesg | tail
[346660.559547] libceph: error -22 on auth protocol 2 init
[346660.560991] libceph: client24278 fsid a7f64266-0894-4f1e-a635-d0aeaca0e993

一些報錯的解決

rbd: map failed: (110) Connection timed out:

root@ceph0:~# rbd map database-pool/db1 
rbd: sysfs write failed
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (110) Connection timed out
root@ceph0:~# dmesg | tail
[343915.358184] libceph: mon1 192.168.31.101:6789 feature set mismatch, my 106b84a842a42 < server's 40106b84a842a42, missing 400000000000000
[343915.363139] libceph: mon1 192.168.31.101:6789 missing required protocol features

解決:

root@ceph0:~# ceph osd pool set database-pool  hashpspool false  --yes-i-really-mean-it 
set pool 3 hashpspool to false
root@ceph0:~# 
root@ceph0:~# rbd map database-pool/db1                             
/dev/rbd0

參考鏈接:http://cephnotes.ksperis.com/blog/2014/01/21/feature-set-mismatch-error-on-ceph-kernel-client

另一種解決方式,用nbd:

root@ceph0:~# apt-get install rbd-nbd
root@ceph0:~# rbd-nbd map  db_pool/db0 
/dev/nbd0

參考鏈接:
https://blog.csdn.net/ygtlovezf/article/details/79107755
https://www.jianshu.com/p/bb9d14bd897c


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM