參考文檔:
- Install-guide:https://docs.openstack.org/install-guide/
- OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
- 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html
十九.Glance集成Ceph
1. 配置glance-api.conf
# 在運行glance-api服務的節點修改glance-api.conf文件,含3個控制節點,以controller01節點為例 # 以下只列出涉及glance集成ceph的section [root@controller01 ~]# vim /etc/glance/glance-api.conf # 打開copy-on-write功能 [DEFAULT] show_image_direct_url = True # 變更默認使用的本地文件存儲為ceph rbd存儲; # 注意紅色字體部分前后一致 [glance_store] #stores = file,http #default_store = file #filesystem_store_datadir = /var/lib/glance/images/ stores = rbd default_store = rbd rbd_store_chunk_size = 8 rbd_store_pool = images rbd_store_user = glance rbd_store_ceph_conf = /etc/ceph/ceph.conf # 變更配置文件,重啟服務 [root@controller01 ~]# systemctl restart openstack-glance-api.service [root@controller01 ~]# systemctl restart openstack-glance-registry.service
2. 上傳鏡像
# 鏡像上傳后,默認地址為ceph集群(ID)的images pool下 [root@controller01 ~]# openstack image create "cirros-qcow2" \ --file ~/cirros-0.3.5-x86_64-disk.img \ --disk-format qcow2 --container-format bare \ --public
# 檢查 [root@controller01 ~]# rbd ls images
3. 定義pool類型
# images啟用后,ceph集群狀態變為:HEALTH_WARN [root@controller01 ~]# ceph -s
# 使用”ceph health detail”,能給出解決辦法; # 未定義pool池類型,可定義為'cephfs', 'rbd', 'rgw'等 [root@controller01 ~]# ceph health detail
# 同時解決volumes與vms兩個pool的問題 [root@controller01 ~]# ceph osd pool application enable images rbd [root@controller01 ~]# ceph osd pool application enable volumes rbd [root@controller01 ~]# ceph osd pool application enable vms rbd # 查看 [root@controller01 ~]# ceph health detail [root@controller01 ~]# ceph osd pool application get images [root@controller01 ~]# ceph osd pool application get volumes [root@controller01 ~]# ceph osd pool application get vms
二十.Cinder集成Ceph
1. 配置cinder.conf
# cinder利用插件式結構,支持同時使用多種后端存儲; # 在cinder-volume所在節點設置cinder.conf中設置相應的ceph rbd驅動即可; # 含3個計算(存儲)節點,以compute01節點為例; # 以下只列出涉及cinder集成ceph的section [root@compute01 ~]# vim /etc/cinder/cinder.conf # 后端使用ceph存儲 [DEFAULT] enabled_backends = ceph # 新增[ceph] section; # 注意紅色字體部分前后一致 [ceph] # ceph rbd驅動 volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 # 如果配置多后端,則“glance_api_version”必須配置在[DEFAULT] section glance_api_version = 2 rbd_user = cinder rbd_secret_uuid = 10744136-583f-4a9c-ae30-9bfb3515526b volume_backend_name = ceph # 變更配置文件,重啟服務 [root@controller01 ~]# systemctl restart openstack-glance-api.service [root@controller01 ~]# systemctl restart openstack-glance-registry.service
2. 驗證
# 查看cinder服務狀態,cinder-volume集成ceph后,狀態”up”; # 或:cinder service-list [root@controller01 ~]# openstack volume service list
3. 生成volume
1)設置卷類型
# 在控制節點為cinder的ceph后端存儲創建對應的type,在配置多存儲后端時可區分類型; # 可通過“cinder type-list”查看 [root@controller01 ~]# cinder type-create ceph
# 為ceph type設置擴展規格,鍵值” volume_backend_name”,value值”ceph” [root@controller01 ~]# cinder type-key ceph set volume_backend_name=ceph [root@controller01 ~]# cinder extra-specs-list
2)生成volume
# 生成volume; # 最后的數字”1”代表容量為1G [root@controller01 ~]# cinder create --volume-type ceph --name ceph-volume1 1
# 檢查生成的volume; # 或:cinder list [root@controller01 ~]# openstack volume list
# 檢查ceph集群的volumes pool [root@controller01 ~]# rbd ls volumes