參考文檔:
- Install-guide:https://docs.openstack.org/install-guide/
- OpenStack High Availability Guide:https://docs.openstack.org/ha-guide/index.html
- 理解Pacemaker:http://www.cnblogs.com/sammyliu/p/5025362.html
十一.Neutron控制/網絡節點集群
1. 創建neutron數據庫
# 在任意控制節點創建數據庫,后台數據自動同步,以controller01節點為例; [root@controller01 ~]# mysql -u root -pmysql_pass MariaDB [(none)]> CREATE DATABASE neutron; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron_dbpass'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron_dbpass'; MariaDB [(none)]> flush privileges; MariaDB [(none)]> exit;
2. 創建neutron-api
# 在任意控制節點操作,以controller01節點為例; # 調用neutron服務需要認證信息,加載環境變量腳本即可 [root@controller01 ~]# . admin-openrc
1)創建neutron用戶
# service項目已在glance章節創建; # neutron用戶在”default” domain中 [root@controller01 ~]# openstack user create --domain default --password=neutron_pass neutron

2)neutron賦權
# 為neutron用戶賦予admin權限 [root@controller01 ~]# openstack role add --project service --user neutron admin
3)創建neutron服務實體
# neutron服務實體類型”network” [root@controller01 ~]# openstack service create --name neutron --description "OpenStack Networking" network

4)創建neutron-api
# 注意--region與初始化admin用戶時生成的region一致; # api地址統一采用vip,如果public/internal/admin分別使用不同的vip,請注意區分; # neutron-api 服務類型為network; # public api [root@controller01 ~]# openstack endpoint create --region RegionTest network public http://controller:9696

# internal api [root@controller01 ~]# openstack endpoint create --region RegionTest network internal http://controller:9696

# admin api [root@controller01 ~]# openstack endpoint create --region RegionTest network admin http://controller:9696

3. 安裝neutron
# 在全部控制節點安裝neutron相關服務,以controller01節點為例 [root@controller01 ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset -y
4. 配置neutron.conf
# 在全部控制節點操作,以controller01節點為例; # 注意”bind_host”參數,根據節點修改; # 注意neutron.conf文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/neutron.conf [DEFAULT] bind_host = 172.30.200.31 auth_strategy = keystone core_plugin = ml2 service_plugins = router allow_overlapping_ips = True notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true # l3高可用,可以采用vrrp模式或者dvr模式; # vrrp模式下,在各網絡節點(此處網絡節點與控制節點混合部署)以vrrp的模式設置主備virtual router;mater故障時,virtual router不會遷移,而是將router對外服務的vip漂移到standby router上; # dvr模式下,三層的轉發(L3 Forwarding)與nat功能都會被分布到計算節點上,即計算節點也有了網絡節點的功能;但是,dvr依然不能消除集中式的virtual router,為了節省IPV4公網地址,仍將snat放在網絡節點上提供; # vrrp模式與dvr模式不可同時使用 # Neutron L3 Agent HA 之 虛擬路由冗余協議(VRRP): http://www.cnblogs.com/sammyliu/p/4692081.html # Neutron 分布式虛擬路由(Neutron Distributed Virtual Routing): http://www.cnblogs.com/sammyliu/p/4713562.html # “l3_ha = true“參數即啟用l3 ha功能 l3_ha = true # 最多在幾個l3 agent上創建ha router max_l3_agents_per_router = 3 # 可創建ha router的最少正常運行的l3 agnet數量 min_l3_agents_per_router = 2 # vrrp廣播網絡 l3_ha_net_cidr = 169.254.192.0/18 # ”router_distributed “參數本身的含義是普通用戶創建路由器時,是否默認創建dvr;此參數默認值為“false”,這里采用vrrp模式,可注釋此參數 # 雖然此參數在mitaka(含)版本后,可與l3_ha參數同時打開,但設置dvr模式還同時需要設置網絡節點與計算節點的l3_agent.ini與ml2_conf.ini文件 # router_distributed = true # dhcp高可用,在3個網絡節點各生成1個dhcp服務器 dhcp_agents_per_network = 3 # 前端采用haproxy時,服務連接rabbitmq會出現連接超時重連的情況,可通過各服務與rabbitmq的日志查看; # transport_url = rabbit://openstack:rabbitmq_pass@controller:5673 # rabbitmq本身具備集群機制,官方文檔建議直接連接rabbitmq集群;但采用此方式時服務啟動有時會報錯,原因不明;如果沒有此現象,強烈建議連接rabbitmq直接對接集群而非通過前端haproxy transport_url=rabbit://openstack:rabbitmq_pass@controller01:5672,controller02:5672,controller03:5672 [agent] [cors] [database] connection = mysql+pymysql://neutron:neutron_dbpass@controller/neutron [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller01:11211,controller:11211,controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron_pass [matchmaker_redis] [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionTest project_name = service username = nova password = nova_pass [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_amqp] [oslo_messaging_kafka] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [quotas] [ssl]
5. 配置ml2_conf.ini
# 在全部控制節點操作,以controller01節點為例; # ml2_conf.ini文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/plugins/ml2/ml2_conf.ini [DEFAULT] [l2pop] [ml2] type_drivers = flat,vlan,vxlan # ml2 mechanism_driver 列表,l2population對gre/vxlan租戶網絡有效 mechanism_drivers = linuxbridge,l2population # 可同時設置多種租戶網絡類型,第一個值是常規租戶創建網絡時的默認值,同時也默認是master router心跳信號的傳遞網絡類型 tenant_network_types = vlan,vxlan,flat extension_drivers = port_security [ml2_type_flat] # 指定flat網絡類型名稱為”external”,”*”表示任意網絡,空值表示禁用flat網絡 flat_networks = external [ml2_type_geneve] [ml2_type_gre] [ml2_type_vlan] # 指定vlan網絡類型的網絡名稱為”vlan”;如果不設置vlan id則表示不受限 network_vlan_ranges = vlan:3001:3500 [ml2_type_vxlan] vni_ranges = 10001:20000 [securitygroup] enable_ipset = true # 服務初始化調用ml2_conf.ini中的配置,但指向/etc/neutron/olugin.ini文件 [root@controller01 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
6. 配置linuxbridge_agent.ini
1)配置linuxbridge_agent.ini
# 在全部控制節點操作,以controller01節點為例; # linuxbridge_agent.ini文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini [DEFAULT] [agent] [linux_bridge] # 網絡類型名稱與物理網卡對應,這里flat external網絡對應規划的eth1,vlan租戶網絡對應規划的eth3,在創建相應網絡時采用的是網絡名稱而非網卡名稱; # 需要明確的是物理網卡是本地有效,根據主機實際使用的網卡名確定; # 另有” bridge_mappings”參數對應網橋 physical_interface_mappings = external:eth1,vlan:eth3 [network_log] [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver enable_security_group = true [vxlan] enable_vxlan = true # tunnel租戶網絡(vxlan)vtep端點,這里對應規划的eth2(的地址),根據節點做相應修改 local_ip = 10.0.0.31 l2_population = true
2)配置內核參數
# bridge:是否允許橋接; # 如果“sysctl -p”加載不成功,報” No such file or directory”錯誤,需要加載內核模塊“br_netfilter”; # 命令“modinfo br_netfilter”查看內核模塊信息; # 命令“modprobe br_netfilter”加載內核模塊 [root@controller01 ~]# echo "# bridge" >> /etc/sysctl.conf [root@controller01 ~]# echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf [root@controller01 ~]# echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf [root@controller01 ~]# sysctl -p
7. 配置l3_agent.ini(self-networking)
# 在全部控制節點操作,以controller01節點為例; # l3_agent.ini文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/l3_agent.ini [DEFAULT] interface_driver = linuxbridge [agent] [ovs]
8. 配置dhcp_agent.ini
# 在全部控制節點操作,以controller01節點為例; # 使用dnsmasp提供dhcp服務; # dhcp_agent.ini文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/dhcp_agent.ini [DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true [agent] [ovs]
9. 配置metadata_agent.ini
# 在全部控制節點操作,以controller01節點為例; # metadata_proxy_shared_secret:與/etc/nova/nova.conf文件中參數一致; # metadata_agent.ini文件的權限:root:neutron [root@controller01 ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak [root@controller01 ~]# egrep -v "^$|^#" /etc/neutron/metadata_agent.ini [DEFAULT] nova_metadata_host = controller metadata_proxy_shared_secret = neutron_metadata_secret [agent] [cache]
10. 配置nova.conf
# 在全部控制節點操作,以controller01節點為例; # 配置只涉及nova.conf的”[neutron]”字段; # metadata_proxy_shared_secret:與/etc/neutron/metadata_agent.ini文件中參數一致 [root@controller01 ~]# vim /etc/nova/nova.conf [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionTest project_name = service username = neutron password = neutron_pass service_metadata_proxy = true metadata_proxy_shared_secret = neutron_metadata_secret
11. 同步neutron數據庫
# 任意控制節點操作; [root@controller01 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron # 驗證 [root@controller01 ~]# mysql -h controller01 -u neutron -pneutron_dbpass -e "use neutron;show tables;"
12. 啟動服務
# 全部控制節點操作; # 變更nova配置文件,首先需要重啟nova服務 [root@controller01 ~]# systemctl restart openstack-nova-api.service # 開機啟動 [root@controller01 ~]# systemctl enable neutron-server.service \ neutron-linuxbridge-agent.service \ neutron-l3-agent.service \ neutron-dhcp-agent.service \ neutron-metadata-agent.service # 啟動 [root@controller01 ~]# systemctl restart neutron-server.service [root@controller01 ~]# systemctl restart neutron-linuxbridge-agent.service [root@controller01 ~]# systemctl restart neutron-l3-agent.service [root@controller01 ~]# systemctl restart neutron-dhcp-agent.service [root@controller01 ~]# systemctl restart neutron-metadata-agent.service
13. 驗證
[root@controller01 ~]# . admin-openrc # 查看加載的擴展服務 [root@controller01 ~]# openstack extension list --network # 查看agent服務 [root@controller01 ~]# openstack network agent list

14. 設置pcs資源
# 在任意控制節點操作; # 添加資源neutron-server,neutron-linuxbridge-agent,neutron-l3-agent,neutron-dhcp-agent與neutron-metadata-agent [root@controller01 ~]# pcs resource create neutron-server systemd:neutron-server --clone interleave=true [root@controller01 ~]# pcs resource create neutron-linuxbridge-agent systemd:neutron-linuxbridge-agent --clone interleave=true [root@controller01 ~]# pcs resource create neutron-l3-agent systemd:neutron-l3-agent --clone interleave=true [root@controller01 ~]# pcs resource create neutron-dhcp-agent systemd:neutron-dhcp-agent --clone interleave=true [root@controller01 ~]# pcs resource create neutron-metadata-agent systemd:neutron-metadata-agent --clone interleave=true # 查看pcs資源 [root@controller01 ~]# pcs resource

