參考文檔:
- Install-guide:https://docs.openstack.org/install-guide/
- OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
- 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html
十五.Neutron計算節點
1. 安裝neutron-linuxbridge
# 在全部計算節點安裝neutro-linuxbridge服務,以compute01節點為例 [root@compute01 ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y
2. 配置neutron.conf
# 在全部計算節點操作,以computer01節點為例; # 注意”bind_host”參數,根據節點修改; # 注意neutron.conf文件的權限:root:neutron [root@compute01 ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak [root@compute01 ~]# egrep -v "^$|^#" /etc/neutron/neutron.conf [DEFAULT] state_path = /var/lib/neutron bind_host = 172.30.200.41 auth_strategy = keystone # 前端采用haproxy時,服務連接rabbitmq會出現連接超時重連的情況,可通過各服務與rabbitmq的日志查看; # transport_url = rabbit://openstack:rabbitmq_pass@controller:5673 # rabbitmq本身具備集群機制,官方文檔建議直接連接rabbitmq集群;但采用此方式時服務啟動有時會報錯,原因不明;如果沒有此現象,強烈建議連接rabbitmq直接對接集群而非通過前端haproxy transport_url=rabbit://openstack:rabbitmq_pass@controller01:5672,controller02:5672,controller03:5672 [agent] [cors] [database] [keystone_authtoken] www_authenticate_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller01:11211,controller:11211,controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron_pass [matchmaker_redis] [nova] [oslo_concurrency] lock_path = $state_path/lock [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [quotas] [ssl]
3. 配置linuxbridge_agent.ini
1)配置linuxbridgr_agent.ini
# 在全部計算節點操作,以compute01節點為例; # linuxbridge_agent.ini文件的權限:root:neutron [root@compute01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak [root@compute01 ~]# egrep -v "^$|^#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini [DEFAULT] [agent] [linux_bridge] # 網絡類型名稱與物理網卡對應,這里vlan租戶網絡對應規划的eth3; # 需要明確的是物理網卡是本地有效,需要根據主機實際使用的網卡名確定; # 另有”bridge_mappings”參數對應網橋 physical_interface_mappings = vlan:eth3 [network_log] [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver enable_security_group = true [vxlan] enable_vxlan = true # tunnel租戶網絡(vxlan)vtep端點,這里對應規划的eth2(的地址),根據節點做相應修改 local_ip = 10.0.0.41 l2_population = true
2)配置內核參數
# bridge:是否允許橋接; # 如果“sysctl -p”加載不成功,報” No such file or directory”錯誤,需要加載內核模塊“br_netfilter”; # 命令“modinfo br_netfilter”查看內核模塊信息; # 命令“modprobe br_netfilter”加載內核模塊 [root@compute01 ~]# echo "# bridge" >> /etc/sysctl.conf [root@compute01 ~]# echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf [root@compute01 ~]# echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf [root@compute01 ~]# sysctl -p
4. 配置nova.conf
# 在全部計算節點操作,以compute01節點為例; # 配置只涉及nova.conf的”[neutron]”字段 [root@compute ~]# vim /etc/nova/nova.conf [neutron] url=http://controller:9696 auth_type=password auth_url=http://controller:35357 project_name=service project_domain_name=default username=neutron user_domain_name=default password=neutron_pass region_name=RegionTest
5. 啟動服務
# nova.conf文件已變更,首先需要重啟全部計算節點的nova服務 [root@compute01 ~]# systemctl restart openstack-nova-compute.service # 開機啟動 [root@compute01 ~]# systemctl enable neutron-linuxbridge-agent.service # 啟動 [root@compute01 ~]# systemctl restart neutron-linuxbridge-agent.service
6. 驗證
# 任意控制節點(或具備客戶端的節點)操作 [root@controller01 ~]# . admin-openrc # 查看neutron相關的agent; # 或:openstack network agent list --agent-type linux-bridge [root@controller01 ~]# openstack network agent list