參考文檔:
- Install-guide:https://docs.openstack.org/install-guide/
- OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
- 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html
- Ceph: http://docs.ceph.com/docs/master/start/intro/
十.Nova控制節點集群
1. 創建nova相關數據庫
# 在任意控制節點創建數據庫,后台數據自動同步,以controller01節點為例; # nova服務含4個數據庫,統一授權到nova用戶; # placement主要涉及資源統籌,較常用的api接口是獲取備選資源與claim資源等 [root@controller01 ~]# mysql -u root -pmysql_pass MariaDB [(none)]> CREATE DATABASE nova_api; MariaDB [(none)]> CREATE DATABASE nova; MariaDB [(none)]> CREATE DATABASE nova_cell0; MariaDB [(none)]> CREATE DATABASE nova_placement; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_placement.* TO 'nova'@'localhost' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_placement.* TO 'nova'@'%' IDENTIFIED BY 'nova_dbpass'; MariaDB [(none)]> flush privileges; MariaDB [(none)]> exit;
2. 創建nova/placement-api
# 在任意控制節點操作,以controller01節點為例; # 調用nova相關服務需要認證信息,加載環境變量腳本即可 [root@controller01 ~]# . admin-openrc
1)創建nova/plcement用戶
# service項目已在glance章節創建; # nova/placement用戶在”default” domain中 [root@controller01 ~]# openstack user create --domain default --password=nova_pass nova [root@controller01 ~]# openstack user create --domain default --password=placement_pass placement
2)nova/placement賦權
# 為nova/placement用戶賦予admin權限 [root@controller01 ~]# openstack role add --project service --user nova admin [root@controller01 ~]# openstack role add --project service --user placement admin
3)創建nova/placement服務實體
# nova服務實體類型”compute”; # placement服務實體類型”placement” [root@controller01 ~]# openstack service create --name nova --description "OpenStack Compute" compute [root@controller01 ~]# openstack service create --name placement --description "Placement API" placement
4)創建nova/placement-api
# 注意--region與初始化admin用戶時生成的region一致; # api地址統一采用vip,如果public/internal/admin分別使用不同的vip,請注意區分; # nova-api 服務類型為compute,placement-api服務類型為placement; # nova public api [root@controller01 ~]# openstack endpoint create --region RegionTest compute public http://controller:8774/v2.1
# nova internal api [root@controller01 ~]# openstack endpoint create --region RegionTest compute internal http://controller:8774/v2.1
# nova admin api [root@controller01 ~]# openstack endpoint create --region RegionTest compute admin http://controller:8774/v2.1
# placement public api [root@controller01 ~]# openstack endpoint create --region RegionTest placement public http://controller:8778
# placement internal api [root@controller01 ~]# openstack endpoint create --region RegionTest placement internal http://controller:8778
# placement admin api [root@controller01 ~]# openstack endpoint create --region RegionTest placement admin http://controller:8778
3. 安裝nova
# 在全部控制節點安裝nova相關服務,以controller01節點為例 [root@controller01 ~]# yum install openstack-nova-api openstack-nova-conductor \ openstack-nova-console openstack-nova-novncproxy \ openstack-nova-scheduler openstack-nova-placement-api -y
4. 配置nova.conf
# 在全部控制節點操作,以controller01節點為例; # 注意”my_ip”參數,根據節點修改; # 注意nova.conf文件的權限:root:nova [root@controller01 ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/nova/nova.conf [DEFAULT] my_ip=172.30.200.31 use_neutron=true firewall_driver=nova.virt.firewall.NoopFirewallDriver enabled_apis=osapi_compute,metadata osapi_compute_listen=$my_ip osapi_compute_listen_port=8774 metadata_listen=$my_ip metadata_listen_port=8775 # 前端采用haproxy時,服務連接rabbitmq會出現連接超時重連的情況,可通過各服務與rabbitmq的日志查看;
# transport_url=rabbit://openstack:rabbitmq_pass@controller:5673 # rabbitmq本身具備集群機制,官方文檔建議直接連接rabbitmq集群;但采用此方式時服務啟動有時會報錯,原因不明;如果沒有此現象,強烈建議連接rabbitmq直接對接集群而非通過前端haproxy transport_url=rabbit://openstack:rabbitmq_pass@controller01:5672,controller02:5672,controller03:5672 [api] auth_strategy=keystone [api_database] connection=mysql+pymysql://nova:nova_dbpass@controller/nova_api [barbican] [cache] backend=oslo_cache.memcache_pool enabled=True memcache_servers=controller01:11211,controller02:11211,controller03:11211 [cells] [cinder] [compute] [conductor] [console] [consoleauth] [cors] [crypto] [database] connection = mysql+pymysql://nova:nova_dbpass@controller/nova [devices] [ephemeral_storage_encryption] [filter_scheduler] [glance] api_servers = http://controller:9292 [guestfs] [healthcheck] [hyperv] [ironic] [key_manager] [keystone] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller01:11211,controller02:11211,controller03:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = nova password = nova_pass [libvirt] [matchmaker_redis] [metrics] [mks] [neutron] [notifications] [osapi_v21] [oslo_concurrency] lock_path=/var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [pci] [placement] region_name = RegionTest project_domain_name = Default project_name = service auth_type = password user_domain_name = Default auth_url = http://controller:35357/v3 username = placement password = placement_pass [quota] [rdp] [remote_debug] [scheduler] [serial_console] [service_user] [spice] [upgrade_levels] [vault] [vendordata_dynamic_auth] [vmware] [vnc] enabled=true server_listen=$my_ip server_proxyclient_address=$my_ip novncproxy_base_url=http://$my_ip:6080/vnc_auto.html novncproxy_host=$my_ip novncproxy_port=6080 [workarounds] [wsgi] [xenserver] [xvp]
5. 配置00-nova-placement-api.conf
# 在全部控制節點操作,以controller01節點為例; # 注意根據不同節點修改監聽地址 [root@controller01 ~]# cp /etc/httpd/conf.d/00-nova-placement-api.conf /etc/httpd/conf.d/00-nova-placement-api.conf.bak [root@controller01 ~]# sed -i "s/Listen\ 8778/Listen\ 172.30.200.31:8778/g" /etc/httpd/conf.d/00-nova-placement-api.conf [root@controller01 ~]# sed -i "s/*:8778/172.30.200.31:8778/g" /etc/httpd/conf.d/00-nova-placement-api.conf [root@controller01 ~]# echo " #Placement API <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> " >> /etc/httpd/conf.d/00-nova-placement-api.conf # 重啟httpd服務,啟動placement-api監聽端口 [root@controller01 ~]# systemctl restart httpd
6. 同步nova相關數據庫
1)同步nova相關數據庫
# 任意控制節點操作; # 同步nova-api數據庫 [root@controller01 ~]# su -s /bin/sh -c "nova-manage api_db sync" nova # 注冊cell0數據庫 [root@controller01 ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova # 創建cell1 cell [root@controller01 ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova # 同步nova數據庫; # 忽略”deprecated”信息 [root@controller01 ~]# su -s /bin/sh -c "nova-manage db sync" nova
補充:
此版本在向數據庫同步導入數據表時,報錯:/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332: NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported
exception.NotSupportedWarning
解決方案如下:
bug:https://bugs.launchpad.net/nova/+bug/1746530
pacth:https://github.com/openstack/oslo.db/commit/c432d9e93884d6962592f6d19aaec3f8f66ac3a2
2)驗證
# cell0與cell1注冊正確 [root@controller01 ~]# nova-manage cell_v2 list_cells
# 查看數據表 [root@controller01 ~]# mysql -h controller01 -u nova -pnova_dbpass -e "use nova_api;show tables;" [root@controller01 ~]# mysql -h controller01 -u nova -pnova_dbpass -e "use nova;show tables;" [root@controller01 ~]# mysql -h controller01 -u nova -pnova_dbpass -e "use nova_cell0;show tables;"
7. 啟動服務
# 在全部控制節點操作,以controller01節點為例; # 開機啟動 [root@controller01 ~]# systemctl enable openstack-nova-api.service \ openstack-nova-consoleauth.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service # 啟動 [root@controller01 ~]# systemctl restart openstack-nova-api.service [root@controller01 ~]# systemctl restart openstack-nova-consoleauth.service [root@controller01 ~]# systemctl restart openstack-nova-scheduler.service [root@controller01 ~]# systemctl restart openstack-nova-conductor.service [root@controller01 ~]# systemctl restart openstack-nova-novncproxy.service # 查看狀態 [root@controller01 ~]# systemctl status openstack-nova-api.service \ openstack-nova-consoleauth.service \ openstack-nova-scheduler.service \ openstack-nova-conductor.service \ openstack-nova-novncproxy.service # 查看端口 [root@controller01 ~]# netstat -tunlp | egrep '8774|8775|8778|6080'
8. 驗證
[root@controller01 ~]# . admin-openrc # 列出各服務組件,查看狀態; # 也可使用命令” nova service-list” [root@controller01 ~]# openstack compute service list
# 展示api端點 [root@controller01 ~]# openstack catalog list
# 檢查cell與placement api運行正常 [root@controller01 ~]# nova-status upgrade check
9. 設置pcs資源
# 在任意控制節點操作; # 添加資源openstack-nova-api,openstack-nova-consoleauth,openstack-nova-scheduler,openstack-nova-conductor與openstack-nova-novncproxy [root@controller01 ~]# pcs resource create openstack-nova-api systemd:openstack-nova-api --clone interleave=true [root@controller01 ~]# pcs resource create openstack-nova-consoleauth systemd:openstack-nova-consoleauth --clone interleave=true [root@controller01 ~]# pcs resource create openstack-nova-scheduler systemd:openstack-nova-scheduler --clone interleave=true [root@controller01 ~]# pcs resource create openstack-nova-conductor systemd:openstack-nova-conductor --clone interleave=true [root@controller01 ~]# pcs resource create openstack-nova-novncproxy systemd:openstack-nova-novncproxy --clone interleave=true # 經驗證,建議openstack-nova-api,openstack-nova-consoleauth,openstack-nova-conductor與openstack-nova-novncproxy 等無狀態服務以active/active模式運行; # openstack-nova-scheduler等服務以active/passive模式運行 # 查看pcs資源; [root@controller01 ~]# pcs resource