1.1 環境准備
|主機名|IP地址|角色
|:-😐:-😐
|ceph-admin|192.168.16.220|manager|
|ceph-node1,ceph-mon|192.168.16.221|node|
|ceph-node2,ceph-mon|192.168.16.222|node|
|ceph-node3,ceph-mon|192.168.16.223|node|
1.1.1 配置ssh密鑰訪問
ssh-keygen -f ~/.ssh/id_rsa;
ssh-copy-id -i root@192.168.16.221-223
1.1.2 添加hosts,配置主機名
cat /etc/hosts
192.168.16.220 ceph-admin
192.168.16.221 ceph-node1 ceph-mon
192.168.16.222 ceph-node2 ceph-mon
192.168.16.223 ceph-node3 ceph-mon
for n in `seq 3`;do \
scp /etc/hosts 192.168.16.22$n:/etc/hosts; \
ssh 192.168.16.22$n "hostnamectl set-hostname ceph-node$n;" \
done
1.1.3 關閉firewalld、selinux
for n in `seq 3`;do \
ssh 192.168.16.22$n "systemctl stop firewalld; \
systemctl disable firewalld; \
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config; \
setenforce 0 "; \
done
1.1.4 時間更新同步
for n in `seq 3`;do \
ssh 192.168.16.22$n "yum install ntpdate -y; \
ntpdate asia.pool.ntp.org" \
done
1.1.5 替換repo
export CEPH_DEPLOY_REPO_URL=http://172.18.210.253/repo/ceph-el7/jewel
export CEPH_DEPLOY_GPG_URL=http://172.18.210.253/repo/ceph-el7/jewel/release.asc
注:如果本地源有問題或不在本地時,使用國內的源即可
export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/rpm-jewel/el7
export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc
1.2 安裝 ceph
1.2.1 安裝ceph管理工具ceph-deploy
yum install -y ceph-deploy
1.2.2 創建工作目錄
mkdir /ceph ; cd /ceph
1.2.3 安裝ceph客戶端
yum install -y ceph
或者 在管理節點上之行
ceph-deploy install ceph-admin ceph-node1 ceph-node2 ceph-node3
1.2.4 創建ceph集群
ceph-deploy new ceph-node1 ceph-node2 ceph-node3 #建議是奇數
# cat ceph.conf
fsid = 7e1daeea-417e-43e3-a2fe-56d9444f2fbf
mon_initial_members = ceph-node1, ceph-node2, ceph-node3
mon_host = 192.168.16.221,192.168.16.222,192.168.16.223
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
rbd_default_features = 1
mon clock drift allowed = 2
mon clock drift warn backoff = 30
注意:
1、部分操作系統kernel只支持layering,所以最好直接在配置文件指明創建rbd時默認支持的特性
rbd_default_features = 1
2、由於ceph分布式對時鍾的同步要求很高,可以將同步誤差范圍調大;
mon clock drift allowed = 2
mon clock drift warn backoff = 30
ceph-deploy mon create-initial
1.2.5 創建osd
- 創建osd有兩種方式
1、使用系統裸盤,作為存儲空間;
2、使用現有文件系統,以目錄或分區作為存儲空間,官方建議為 OSD 及其日志使用獨立硬盤或分區作為存儲空間
1.2.5.1 使用分區
ceph-deploy disk zap ceph-node1:/dev/sdb1 ceph-node2:/dev/sdb1 ceph-node3:/dev/sdb1
ceph-deploy osd prepare ceph-node1:/dev/sdb1 ceph-node2:/dev/sdb1 ceph-node3:/dev/sdb1
ceph-deploy osd activate ceph-node1:/dev/sdb1 ceph-node2:/dev/sdb1 ceph-node3:/dev/sdb1
1.2.5.2 使用目錄
ssh ceph-node1 “mkdir /datal/osd0;chown -R ceph:ceph /data/osd0"
ssh ceph-node2 “mkdir /datal/osd1;chown -R ceph:ceph /data/osd1"
ssh ceph-node3 “mkdir /datal/osd2;chown -R ceph:ceph /data/osd2"
ceph-deploy osd prepare ceph-node1:/data/osd0 ceph-node2:/data/osd1 ceph-node3:/data/osd2
ceph-deploy osd acivate ceph-node1:/data/osd0 ceph-node2:/data/osd1 ceph-node3:/data/osd2
1.2.6 賦予管理員權限
ceph-deploy admin ceph-admin
# ceph -s
cluster 7e1daeea-417e-43e3-a2fe-56d9444f2fbf
health HEALTH_OK
monmap e1: 3 mons at {ceph-node1=192.168.16.221:6789/0,ceph-node2=192.168.16.222:6789/0,ceph-node3=192.168.16.223:6789/0}
election epoch 4, quorum 0,1,2 ceph-node1,ceph-node2,ceph-node3
osdmap e14: 3 osds: 3 up, 3 in
flags sortbitwise,require_jewel_osds
pgmap v24: 64 pgs, 1 pools, 0 bytes data, 0 objects
15460 MB used, 2742 GB / 2757 GB avail
64 active+clean
# ceph health
HEALTH_OK
1.2.7 創建pool
ceph osd pool create image 64
ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
ceph建好后默認有個rbd池,可以考慮刪除
rbd create test --size 1024 -p image
注:創建一個鏡像,-p參數指定池的名稱,-size單位為M
1.3 常用操作
1.3.1 ceph reset
ceph-deploy purge 節點1 節點2 ....
ceph-deploy purgedata 節點1 節點2 ....
ceph-deploy forgetkeys
1.3.2 常用命令
rados lspools 查看池子
ceph -s 或 ceph status 查看集群狀態
ceph -w 觀察集群健康狀態
ceph quorum_status --format json-pretty 檢查ceph monitor仲裁狀態
ceph df 檢查集群使用情況
ceph mon stat 檢查monitor狀態
ceph osd stat 檢查osd狀態
ceph pg stat 檢查pg配置組狀態
ceph pg dump 列出PG
ceph osd lspools 列出存儲池
ceph osd tree 檢查osd的crush map
ceph auth list 列出集群的認證密鑰
ceph 獲取每個osd上pg的數量
