高可用OpenStack(Queen版)集群-9.Cinder控制節點集群


參考文檔:

  1. Install-guide:https://docs.openstack.org/install-guide/
  2. OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
  3. 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html

十三.Cinder控制節點集群

1. 創建cinder數據庫

# 在任意控制節點創建數據庫,后台數據自動同步,以controller01節點為例;
[root@controller01 ~]# mysql -uroot -pmysql_pass

MariaDB [(none)]> CREATE DATABASE cinder;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder_dbpass';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder_dbpass';

MariaDB [(none)]> flush privileges;
MariaDB [(none)]> exit;

2. 創建cinder-api

# 在任意控制節點操作,以controller01節點為例;
# 調用cinder服務需要認證信息,加載環境變量腳本即可
[root@controller01 ~]# . admin-openrc

1)創建cinder用戶

# service項目已在glance章節創建;
# neutron用戶在”default” domain中
[root@controller01 ~]# openstack user create --domain default --password=cinder_pass cinder

2)cinder賦權

# 為cinder用戶賦予admin權限
[root@controller01 ~]# openstack role add --project service --user cinder admin

3)創建cinder服務實體

# cinder服務實體類型”volume”;
# 創建v2/v3兩個服務實體
[root@controller01 ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
[root@controller01 ~]# openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

4)創建cinder-api

# 注意--region與初始化admin用戶時生成的region一致;
# api地址統一采用vip,如果public/internal/admin分別使用不同的vip,請注意區分;
# cinder-api 服務類型為volume;
# cinder-api后綴為用戶project-id,可通過”openstack project list”查看
# v2 public api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev2 public http://controller:8776/v2/%\(project_id\)s

# v2 internal api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev2 internal http://controller:8776/v2/%\(project_id\)s

# v2 admin api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev2 admin http://controller:8776/v2/%\(project_id\)s

# v3 public api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev3 public http://controller:8776/v3/%\(project_id\)s

# v3 internal api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev3 internal http://controller:8776/v3/%\(project_id\)s

# v3 admin api
[root@controller01 ~]# openstack endpoint create --region RegionTest volumev3 admin http://controller:8776/v3/%\(project_id\)s

3. 安裝cinder

# 在全部控制節點安裝cinder服務,以controller01節點為例
[root@controller01 ~]# yum install openstack-cinder -y

4. 配置cinder.conf

# 在全部控制節點操作,以controller01節點為例;
# 注意”my_ip”參數,根據節點修改;
# 注意cinder.conf文件的權限:root:cinder
[root@controller01 ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
[root@controller01 ~]# egrep -v "^$|^#" /etc/cinder/cinder.conf
[DEFAULT]
state_path = /var/lib/cinder
my_ip = 172.30.200.31
glance_api_servers = http://controller:9292
auth_strategy = keystone
osapi_volume_listen = $my_ip
osapi_volume_listen_port = 8776
log_dir = /var/log/cinder
# 前端采用haproxy時,服務連接rabbitmq會出現連接超時重連的情況,可通過各服務與rabbitmq的日志查看;
# transport_url = rabbit://openstack:rabbitmq_pass@controller:5673
# rabbitmq本身具備集群機制,官方文檔建議直接連接rabbitmq集群;但采用此方式時服務啟動有時會報錯,原因不明;如果沒有此現象,強烈建議連接rabbitmq直接對接集群而非通過前端haproxy
transport_url=rabbit://openstack:rabbitmq_pass@controller01:5672,controller02:5672,controller03:5672
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:cinder_dbpass@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller01:11211,controller02:11211,controller03:11211
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = cinder_pass
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = $state_path/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]

5. 配置nova.conf

# 在全部控制節點操作,以controller01節點為例;
# 配置只涉及nova.conf的”[cinder]”字段;
# 加入對應regiong
[root@controller01 ~]# vim /etc/nova/nova.conf
[cinder]
os_region_name=RegionTest

6. 同步cinder數據庫

# 任意控制節點操作;
# 忽略部分”deprecation”信息
[root@controller01 ~]# su -s /bin/sh -c "cinder-manage db sync" cinder

# 驗證
[root@controller01 ~]# mysql -h controller -ucinder -pcinder_dbpass -e "use cinder;show tables;"

7. 啟動服務

# 全部控制節點操作;
# 變更nova配置文件,首先需要重啟nova服務
[root@controller01 ~]# systemctl restart openstack-nova-api.service


# 開機啟動
[root@controller01 ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service

# 啟動
[root@controller01 ~]# systemctl restart openstack-cinder-api.service
[root@controller01 ~]# systemctl restart openstack-cinder-scheduler.service

8. 驗證

[root@controller01 ~]# . admin-openrc 

# 查看agent服務;
# 或:cinder service-list
[root@controller01 ~]# openstack volume service list

9. 設置pcs資源

# 在任意控制節點操作;
# 添加資源cinder-api與cinder-scheduler
[root@controller01 ~]# pcs resource create openstack-cinder-api systemd:openstack-cinder-api --clone interleave=true
[root@controller01 ~]# pcs resource create openstack-cinder-scheduler systemd:openstack-cinder-scheduler --clone interleave=true

# cinder-api與cinder-scheduler以active/active模式運行;
# openstack-nova-volume以active/passive模式運行

# 查看資源
[root@controller01 ~]# pcs resource


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM