openstack-往已有集群中添加控制節點,實現控制節點的高可用


新添加的controller節點基礎環境准備

1、yum install centos-release-openstack-train.noarch -y  #安裝T版yum源

2、yum install python-openstackclient openstack-selinux -y  #安裝openstack客戶端命令和selinux

3、yum install python2-PyMySQL -y   #安裝py程序連接mysql所需要的模塊

4、yum install python-memcached -y  #安裝py程序連接memcache所需要的模塊

5、scp 172.31.7.101:/root/{admin-openrc.sh,demo-openrc.sh} /root/
#將controller1上admin和myuser用戶的環境變量拷貝到當前controller2節點上

controller-安裝keystone

1、yum install openstack-keystone httpd mod_wsgi -y  #安裝kyestone服務

##到之前已經部署好的controller節點上,把已經部署好的keystone的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/keystone/
(2)、tar czvf keystone-controller1.tar.gz ./*
(3)、scp keystone-controller1.tar.gz 172.31.7.102:/etc/keyston

2、cd /etc/keystone/

3、tar xvf keystone-controller1.tar.gz

4、vim /etc/httpd/conf/httpd.conf
ServerName 172.31.7.102:80    #讓servername監聽本機地址(主站點)

5、vim /etc/hosts
172.31.7.248 openstack-vip.linux.local

6、ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

7、systemctl start httpd.service

8、systemctl enable httpd.service


##到controller2節點進行測試,關閉haproxy上controller1節點的5000端口,通過controller2節點5000端口進行測試
1、. admin-openrc.sh 

2、neutron agent-list   #能通過keystone認證后,從mysql中獲取到數據即配置正確

controller-安裝glance

1、yum install openstack-glance -y

##到之前已經部署好的controller節點上,把已經部署好的glance的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/glance/
(2)、tar czvf glance-controller1.tar.gz ./*
(3)、scp glance-controller1.tar.gz 172.31.7.102:/etc/glance/

2、cd /etc/glance/

3、tar xvf glance-controller1.tar.gz

4、systemctl start openstack-glance-api.service

5、systemctl enable openstack-glance-api.service

6、vim /etc/fstab     #將NFS存放鏡像的目錄掛載到當前controller2節點存放鏡像的目錄下
172.31.7.105:/data/glance/ /var/lib/glance/images/ nfs defaults,_netdev 0 0

7、mount -a 
#注意 /var/lib/glance/images/ 目錄權限


##到controller2節點進行測試,關閉haproxy上controller1節點的9292端口,通過controller2節點9292端口進行測試
1、openstack image list   #通過keystone認證后,是否能夠獲取到鏡像

controller-安裝placement

1、yum install openstack-placement-api -y

##到之前已經部署好的controller節點上,把已經部署好的placement的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/placement/
(2)、tar czvf placement-controller1.tar.gz ./*
(3)、scp placement-controller1.tar.gz 172.31.7.102:/etc/placement/

2、cd /etc/placement/

3、tar xvf placement-controller1.tar.gz

4、vim /etc/httpd/conf.d/00-placement-api.conf   #下面內容添加到配置文件的最后
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>

5、systemctl restart httpd


##到controller2節點進行測試,關閉haproxy上controller1節點的8778端口,通過controller2節點8778端口進行測試
1、placement-status upgrade check  #查看狀態是否是success

controller-安裝nova

1、yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-schedule -y
  
##到之前已經部署好的controller節點上,把已經部署好的nova的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/nova/
(2)、tar czvf nova-controller1.tar.gz ./*
(3)、scp nova-controller1.tar.gz 172.31.7.102:/etc/nova/

2、cd /etc/nova/

3、tar xvf nova-controller1.tar.gz

4、grep "172" ./* -R   #查看有哪些配置需要修改
./nova.conf:server_listen = 172.31.7.101
./nova.conf:server_proxyclient_address = 172.31.7.101

5、vim nova.conf
[vnc]
server_listen = 172.31.7.102   #指定vnc服務端監聽地址都為controller2本機地址
server_proxyclient_address = 172.31.7.102

6、systemctl start \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service

7、systemctl enable \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service
    
8、tail -f /var/log/nova/*.log  #日志中不能有任何報錯


##到controller2節點進行測試,關閉haproxy上controller1節點的8774和6080端口,通過controller2節點8774和6080端口進行測試
1、nova service-list  #列出nova的所有服務,並且狀態必須是UP

controller-安裝neutron

1、yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

##到之前已經部署好的controller節點上,把已經部署好的neutron的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/neutron/
(2)、tar czvf neutron-controller1.tar.gz ./*
(3)、scp neutron-controller1.tar.gz 172.31.7.102:/etc/neutron/

2、cd /etc/neutron/

3、tar xvf neutron-controller1.tar.gz

4、vim /etc/sysctl.conf    #添加內核參數
net.bridge.bridge-nf-call-iptables =1
net.bridge.bridge-nf-call-ip6tables =1

5、vim /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
     metric = 100               #第400行
     #if 'metric' in gateway:   #注釋掉這兩行,否則brq網橋設備無法自動綁定eth0網卡
     #    metric = gateway['metric'] - 1
     
6、systemctl start neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

7、sysctl -p

8、systemctl enable neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service  

9、tail -f /var/log/neutron/*.log   #日志中不能有任何報錯


##到controller2節點進行測試,關閉haproxy上controller1節點的9696端口,通過controller2節點9696端口進行測試
1、neutron agent-list  #列出neutron的所有服務,並且狀態必須是true

controller-安裝dashboard

1、yum install openstack-dashboard -y

##到之前已經部署好的controller節點上,把已經部署好的dashboard的配置文件目錄進行打包,拷貝到當前controller節點上
(1)、cd /etc/openstack-dashboard/
(2)、tar zcvf openstack-dashboard-controller1.tar.gz ./*
(3)、scp openstack-dashboard-controller1.tar.gz 172.31.7.102:/etc/openstack-dashboard/

2、cd /etc/openstack-dashboard/

3、tar xvf openstack-dashboard-controller1.tar.gz

4、grep "172" ./* -R
./local_settings:ALLOWED_HOSTS = ['172.31.7.101', 'openstack-vip.linux.local']
./local_settings:OPENSTACK_HOST = "172.31.7.101"

5、vim local_settings
ALLOWED_HOSTS = ['172.31.7.102', 'openstack-vip.linux.local']
OPENSTACK_HOST = "172.31.7.102"

6、systemctl restart httpd

7、tail -f /var/log/httpd/*.log   #日志中不能有任何報錯


##到controller2節點進行測試,關閉haproxy上controller1節點的80端口,通過controller2節點80端口進行測試
1、http://172.31.7.102/dashboard   #瀏覽器訪問,賬號密碼都可以用admin或myuser,


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM