openstack高可用集群2-openstack部署-(嵌套+openvswitch)(OpenStack P版在CentOS 7.6的部署)


CentOS 7.6結合openstack  pike版本詳解
由於實驗資源有限,本次實驗在同一物理機(centos7.4)開啟嵌套虛擬化並使用openvswitch作為網絡底層進行實驗
 
 
 
 
第一章 創建實驗使用的虛機
 
實驗物理機
[root@bj_dfgc_gitserver_2 vm]# cat /sys/module/kvm_intel/parameters/nested
N
沒有KVM上啟用嵌套虛擬化
cat >/etc/modprobe.d/kvm-nested.conf<<EOF
options kvm-intel nested=1
options kvm-intel enable_shadow_vmcs=1
options kvm-intel enable_apicv=1
options kvm-intel ept=1
EOF
 
[root@bj_dfgc_gitserver_2 vm]# modprobe -r kvm_intel
[root@bj_dfgc_gitserver_2 vm]# lsmod | grep kvm
[root@bj_dfgc_gitserver_2 vm]# modprobe -a kvm_intel
[root@bj_dfgc_gitserver_2 vm]# lsmod | grep kvm
kvm_intel             170086  0
kvm                   566340  1 kvm_intel
irqbypass              13503  1 kvm
[root@bj_dfgc_gitserver_2 vm]# cat /sys/module/kvm_intel/parameters/nested
Y
 
OK, KVM上啟用了嵌套虛擬化
 
安裝openvswitch
yum源自帶的openvswitch包好像有毛病,安裝后服務起不來....
http://rpmfind.net/網站能搜索到各種rpm包,我們搜索openvswitch的rpm並下載下來進行安裝,要不就需要編譯安裝了...
 
yum install openvswitch-2.0.0-7.el7.x86_64.rpm -y
啟動openvswitch的服務
systemctl start openvswitch
systemctl enable openvswitch
 
安裝和啟動libvirtd服務
yum install qemu-kvm qemu-kvm-tools virt-manager libvirt virt-install -y
systemctl enable libvirtd
systemctl start libvirtd
 
創建虛機
 
編輯虛機xml文件
[root@bj_dfgc_gitserver_2 vm]# cat node1.xml
<domain type='kvm'>
  <name>node1</name>
  <memory unit='GiB'>24</memory>
  <currentMemory unit='GiB'>8</currentMemory>
  <vcpu placement='static' current='8'>24</vcpu>
  <os>
    <type arch='x86_64' machine='pc'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <cpu mode="host-passthrough"/>
  <clock offset='localtime'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node1.qcow2'/>
      <target dev='vda' bus='virtio'/>
    </disk>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node1-data.qcow2'/>
      <target dev='vdb' bus='virtio'/>
    </disk>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw1'/>
      <vlan trunk='yes'>
        <tag id='99'/>
        <tag id='100'/>
        <tag id='101'/>
        <tag id='102'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw1-1'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw2'/>
      <vlan trunk='yes'>
        <tag id='809'/>
        <tag id='810'/>
        <tag id='811'/>
        <tag id='812'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw2-1'/>
    </interface>
    <serial type='pty'/>
    <input type='tablet' bus='usb'/>
    <graphics type='vnc' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
    <video>
      <model type='cirrus'/>
    </video>
    <memballoon model='virtio'>
      <stats period='10'/>
    </memballoon>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/node1.agent'/>
      <target type='virtio' name='org.qemu.guest_agent.0'/>
    </channel>
  </devices>
</domain>
 
-------------------------------------------------------------
# cat node2.xml
<domain type='kvm'>
  <name>node2</name>
  <memory unit='GiB'>24</memory>
  <currentMemory unit='GiB'>16</currentMemory>
  <vcpu placement='static' current='8'>24</vcpu>
  <os>
    <type arch='x86_64' machine='pc'>hvm</type>
    <boot dev='hd'/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <pae/>
  </features>
  <cpu mode="host-passthrough"/>
  <clock offset='localtime'/>
  <on_poweroff>destroy</on_poweroff>
  <on_reboot>restart</on_reboot>
  <on_crash>destroy</on_crash>
  <devices>
    <emulator>/usr/libexec/qemu-kvm</emulator>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node2.qcow2'/>
      <target dev='vda' bus='virtio'/>
    </disk>
    <disk type='file' device='disk'>
      <driver name='qemu' type='qcow2' cache='writeback'/>
      <source file='/data/vm/node2-data.qcow2'/>
      <target dev='vdb' bus='virtio'/>
    </disk>
    <controller type='pci' index='0' model='pci-root'>
      <alias name='pci.0'/>
    </controller>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type='bridge'>
      <model type='virtio'/>
      <source bridge='br_p4p1'/>
      <driver name='vhost'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw1'/>
      <vlan trunk='yes'>
        <tag id='99'/>
        <tag id='100'/>
        <tag id='101'/>
        <tag id='102'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw1-2'/>
    </interface>
    <interface type="bridge">
      <model type='virtio'/>
      <source bridge='opensw2'/>
      <vlan trunk='yes'>
        <tag id='809'/>
        <tag id='810'/>
        <tag id='811'/>
        <tag id='812'/>
      </vlan>
      <virtualport type='openvswitch'>
      </virtualport>
      <target dev='opensw2-2'/>
    </interface>
    <serial type='pty'/>
    <input type='tablet' bus='usb'/>
    <graphics type='vnc' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
    <video>
      <model type='cirrus'/>
    </video>
    <memballoon model='virtio'>
      <stats period='10'/>
    </memballoon>
    <channel type='unix'>
      <source mode='bind' path='/var/lib/libvirt/qemu/node2.agent'/>
      <target type='virtio' name='org.qemu.guest_agent.0'/>
    </channel>
  </devices>
</domain>
 
 
從已經制作好的centos7.6鏡像直接拿來用
[root@bj_dfgc_gitserver_2 vm]# cp centos7.1810.qcow2 node1.qcow2
[root@bj_dfgc_gitserver_2 vm]# cp centos7.1810.qcow2 node2.qcow2
 
創建實驗使用的橋接網卡br0和br1
ovs-vsctl add-br opensw1
ovs-vsctl add-br opensw2
 
為node1和node2創建存儲磁盤
[root@bj_dfgc_gitserver_2 vm]# qemu-img create -f qcow2 node2-data.qcow2 500G
Formatting 'node2-data.qcow2', fmt=qcow2 size=536870912000 encryption=off cluster_size=65536 lazy_refcounts=off
[root@bj_dfgc_gitserver_2 vm]# qemu-img info node2-data.qcow2
image: node2-data.qcow2
file format: qcow2
virtual size: 500G (536870912000 bytes)
disk size: 200K
cluster_size: 65536
Format specific information:
    compat: 1.1
    lazy refcounts: false
 
啟動二台實驗虛機
 
[root@bj_dfgc_gitserver_2 vm]# virsh define node1.xml
[root@bj_dfgc_gitserver_2 vm]# virsh define node2.xml
[root@bj_dfgc_gitserver_2 vm]# virsh start node1
[root@bj_dfgc_gitserver_2 vm]# virsh start node2
 
查看啟動后的橋接情況
# ovs-vsctl show
ed4b0290-d9c8-44a6-83cf-3093ce28c7cc
    Bridge "opensw1"
        Port "opensw1"
            Interface "opensw1"
                type: internal
        Port "opensw1-2"
            trunks: [99, 100, 101, 102]
            Interface "opensw1-2"
        Port "opensw1-1"
            trunks: [99, 100, 101, 102]
            Interface "opensw1-1"
    Bridge "opensw2"
        Port "opensw2-2"
            trunks: [809, 810, 811, 812]
            Interface "opensw2-2"
        Port "opensw2"
            Interface "opensw2"
                type: internal
        Port "opensw2-1"
            trunks: [809, 810, 811, 812]
            Interface "opensw2-1"
    ovs_version: "2.0.0"
 
 
root@bj_dfgc_gitserver_2 vm]# brctl show
bridge name    bridge id        STP enabled    interfaces
br0        8000.000000000000    no        
br1        8000.000000000000    no        
br_p4p1        8000.2c534a0105e2    no        p4p1
                            vnet0
                            vnet1
                            vnet2
 
物理網卡的配置
[root@bj_dfgc_gitserver_2 vm]# cat /etc/sysconfig/network-scripts/ifcfg-p4p1
TYPE=Ethernet
BOOTPROTO=static
NAME=p4p1
DEVICE=p4p1
ONBOOT=yes
BRIDGE=br_p4p1
[root@bj_dfgc_gitserver_2 vm]# cat /etc/sysconfig/network-scripts/ifcfg-brp4p1
TYPE=bridge
ONBOOT=yes
DEVICE=br_p4p1
BOOTRPOTO=static
IPADDR=10.88.66.5
NETMASK=255.255.255.224
GATEWAY=10.88.66.1
DNS1=114.114.114.114
 
--------------------------------------------------------------------------------------------------------------------------
在虛機中開始openstack的安裝實驗
官方中文安裝指導文檔: https://docs.openstack.org/zh_CN/install-guide/
 
第二章 OpenStack環境准備
[root@openstack1 ~]# cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
 
                      eth0                  eth1                    eth2
openstack1   10.88.66.15       Trunk                  Trunk                 管理節點 
openstack2   10.88.66.16       Trunk                  Trunk                 計算節點  
 
修改主機名和hosts文件
echo -e "10.88.66.15 openstack1 \n10.88.66.16 openstack2 " >> /etc/hosts
 
管理節點
hostnamectl set-hostname openstack1
計算節點
hostnamectl set-hostname openstack2
 
基礎軟件包安裝
     
基礎軟件包需要在所有的OpenStack節點上進行安裝,包括控制節點和計算節點。
 
1.安裝EPEL倉庫
yum install epel-release -y
yum install -y http://mirrors.aliyun.com/epel/epel-release-latest-7.noarch.rpm
 
2.安裝OpenStack倉庫
 
從pike版本后版本都是直接centos基礎源extras里了,可以直接yum
[root@openstack1 ~]# yum search openstack
Loaded plugins: fastestmirror
Determining fastest mirrors
* extras: mirror.jdcloud.com
* updates: mirror.jdcloud.com
=================================== N/S matched: openstack ====================================
ansible-openstack-modules.noarch : Unofficial Ansible modules for managing Openstack
centos-release-openstack-ocata.noarch : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-pike.x86_64 : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-queens.noarch : OpenStack from the CentOS Cloud SIG repo configs
centos-release-openstack-rocky.noarch : OpenStack from the CentOS Cloud SIG repo configs
diskimage-builder.noarch : Image building tools for OpenStack
golang-github-rackspace-gophercloud-devel.noarch : The Go SDK for Openstack
                                                 : http://gophercloud.io
php-opencloud.noarch : PHP SDK for OpenStack/Rackspace APIs
php-opencloud-doc.noarch : Documentation for PHP SDK for OpenStack/Rackspace APIs
python2-oslo-sphinx.noarch : OpenStack Sphinx Extensions and Theme for Python 2
 
  Name and summary matches only, use "search all" for everything.
 
[root@openstack1 ~]# yum install centos-release-openstack-pike -y
 
3.安裝OpenStack客戶端
yum install -y python-openstackclient
4.安裝openstack SELinux管理包
yum install -y openstack-selinux
 
5.時間同步
yum install -y ntp
systemctl enable ntpd
ntpdate  time.pool.aliyun.com && hwclock -w
timedatectl set-timezone Asia/Shanghai
echo '0 3 * * * root /usr/sbin/ntpdate   time.pool.aliyun.com  && hwclock -w >/dev/null 2>&1' >>/etc/crontab  
echo '0 2 * * * root timedatectl set-timezone Asia/Shanghai && ntpdate time1.aliyun.com && hwclock -w >/dev/null 2>&1' >>/etc/crontab
 
---------------------------------------------------------------------------------------------------------------
MySQL數據庫部署
 
MySQL安裝
[root@openstack1 ~]# yum install -y mariadb mariadb-server python2-PyMySQL
 
配置數據庫
適用於RHEL和CentOS的SQL數據庫安裝的官方文檔: https://docs.openstack.org/install-guide/environment-sql-database-rdo.html
 
創建並編輯 /etc/my.cnf.d/openstack.cnf,然后完成如下動作:
在 [mysqld] 部分,設置 ``bind-address``值為控制節點的管理網絡IP地址以使得其它節點可以通過管理網絡訪問數據庫:
[mysqld]
bind-address = 10.88.66.15
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
完成安裝
啟動數據庫服務,並將其配置為開機自啟:
[root@openstack1 ~]# systemctl enable mariadb.service
[root@openstack1 ~]# systemctl start mariadb.service
為了保證數據庫服務的安全性,運行``mysql_secure_installation``腳本。特別需要說明的是,為數據庫的root用戶設置一個適當的密碼。(實驗環境可以不設密碼,更方便)
[root@openstack1 ~]# mysql_secure_installation
 
#Glance數據庫
mysql -u root -e "CREATE DATABASE glance;"
mysql -u root -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"
mysql -u root -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
#Nova數據庫
mysql -u root -e "CREATE DATABASE nova;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE nova_api; "
mysql -u root -e " GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova'; "
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE nova_cell0;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost'  IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%'  IDENTIFIED BY 'nova';"
 
mysql -u root -e "CREATE DATABASE placement;"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost'   IDENTIFIED BY 'placement';"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%'    IDENTIFIED BY 'placement';"
#Neutron 數據庫
mysql -u root -e "CREATE DATABASE neutron;"
mysql -u root -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"
mysql -u root -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"
#Cinder數據庫
mysql -u root -e "CREATE DATABASE cinder;"
mysql -u root -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"
mysql -u root -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
 
#keystone數據庫
mysql -u root -e "CREATE DATABASE keystone;"
mysql -u root -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"
mysql -u root -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"
 
 
注:N版以后nova多了一個nova_cell0的數據庫
 
如果你的數據沒有設置root密碼自己執行下面的命令創建所有數據庫
mysql -u root  -e "CREATE DATABASE glance;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'glance';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';"
mysql -u root  -e "CREATE DATABASE nova;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'nova';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
mysql -u root  -e "CREATE DATABASE nova_api; "
mysql -u root  -e " GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'nova'; "
mysql -u root  -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';"
mysql -u root -e "CREATE DATABASE nova_cell0;"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost'  IDENTIFIED BY 'nova';"
mysql -u root -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%'  IDENTIFIED BY 'nova';"
mysql -u root  -e "CREATE DATABASE neutron;"
mysql -u root -e "CREATE DATABASE placement;"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost'   IDENTIFIED BY 'placement';"
mysql -u root -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%'    IDENTIFIED BY 'placement';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'neutron';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';"
mysql -u root  -e "CREATE DATABASE cinder;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'cinder';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder';"
mysql -u root  -e "CREATE DATABASE keystone;"
mysql -u root  -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'keystone';"
mysql -u root  -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone';"
查看數據庫和授權是否成功,以keystone數據庫為示范
[root@openstack1 ~]# mysql -u root -e "SHOW GRANTS FOR keystone@'%';"
+---------------------------------------------------------------------------------------------------------+
| Grants for keystone@%                                                                                   |
+---------------------------------------------------------------------------------------------------------+
| GRANT USAGE ON *.* TO 'keystone'@'%' IDENTIFIED BY PASSWORD '*936E8F7AB2E21B47F6C9A7E5D9FE14DBA2255E5A' |
| GRANT ALL PRIVILEGES ON `keystone`.* TO 'keystone'@'%'                                                  |
+---------------------------------------------------------------------------------------------------------+
 
注:如果你想不輸入密碼登陸mysql數據庫,方法如下
[root@openstack1 ~]# vim .my.cnf
[client]
host=localhost
user='root'
password='YOURMYSQLPASSWORD'
 
 注意替換密碼'YOURMYSQLPASSWORD'為你自己的mysql數據庫密碼
 
消息代理RabbitMQ
 
適用於RHEL和CentOS的消息隊列安裝官方文檔: https://docs.openstack.org/install-guide/environment-messaging-rdo.html
 
1.安裝RabbitMQ
安裝包:
[root@openstack1 ~]# yum install -y rabbitmq-server
啟動消息隊列服務並將其配置為隨系統啟動:
 
[root@openstack1 ~]# systemctl enable rabbitmq-server.service
[root@openstack1 ~]# systemctl start rabbitmq-server.service
添加 openstack 用戶:
 
[root@openstack1 ~]# rabbitmqctl add_user openstack openstack
 
注:在執行此操作時確保主機名和/etc/hosts里顯示的一致,要不會操作失敗並報錯
用合適的密碼替換 RABBIT_DBPASS。
 
給``openstack``用戶配置寫和讀權限:
 
[root@openstack1 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
 
 
列出rabbitmq的插件:
[root@openstack1 ~]# rabbitmq-plugins list
Setting permissions for user "openstack" in vhost "/" ...
[root@openstack1 ~]# rabbitmq-plugins list
Configured: E = explicitly enabled; e = implicitly enabled
| Status:   * = running on rabbit@openstack1
|/
[  ] amqp_client                       3.6.5
[  ] cowboy                            1.0.3
[  ] cowlib                            1.0.1
[  ] mochiweb                          2.13.1
[  ] rabbitmq_amqp1_0                  3.6.5
[  ] rabbitmq_auth_backend_ldap        3.6.5
[  ] rabbitmq_auth_mechanism_ssl       3.6.5
[  ] rabbitmq_consistent_hash_exchange 3.6.5
[  ] rabbitmq_event_exchange           3.6.5
[  ] rabbitmq_federation               3.6.5
[  ] rabbitmq_federation_management    3.6.5
[  ] rabbitmq_jms_topic_exchange       3.6.5
[  ] rabbitmq_management               3.6.5
[  ] rabbitmq_management_agent         3.6.5
[  ] rabbitmq_management_visualiser    3.6.5
[  ] rabbitmq_mqtt                     3.6.5
[  ] rabbitmq_recent_history_exchange  1.2.1
[  ] rabbitmq_sharding                 0.1.0
[  ] rabbitmq_shovel                   3.6.5
[  ] rabbitmq_shovel_management        3.6.5
[  ] rabbitmq_stomp                    3.6.5
[  ] rabbitmq_top                      3.6.5
[  ] rabbitmq_tracing                  3.6.5
[  ] rabbitmq_trust_store              3.6.5
[  ] rabbitmq_web_dispatch             3.6.5
[  ] rabbitmq_web_stomp                3.6.5
[  ] rabbitmq_web_stomp_examples       3.6.5
[  ] sockjs                            0.3.4
[  ] webmachine                        1.10.3
 
---------------------------------------------------------------------------------------
 
開機自啟動rabbitmq的管理插件:
[root@openstack1 ~]# rabbitmq-plugins enable rabbitmq_management
---------------------------------------------------------------------------------------
重新啟動rabbitmq:
[root@openstack1 ~]# systemctl restart rabbitmq-server.service
---------------------------------------------------------------------------------------
再次查看監聽的端口:web管理端口:15672
[root@openstack1 ~]# netstat -lntup
tcp        0      0 0.0.0.0:15672           0.0.0.0:*               LISTEN      11412/beam
---------------------------------------------------------------------------------------
 
web端打開10.88.66.15:15672        用戶名 guest      密碼 guest
登錄進去之后:
Admin------->復制administrator------->點擊openstack------>Update this user-------->
Tags:粘帖administrator--------->密碼都設置為openstack-------->logout
然后在登陸:用戶名 openstack  密碼  openstack
 
安裝Memcached
服務的身份服務身份驗證機制使用Memcached來緩存令牌。memcached服務通常在控制器節點上運行。對於生產部署,我們建議啟用防火牆,身份驗證和加密的組合以保護它。
安裝和配置組件
1. 安裝包:
[root@openstack1 ~]# yum install -y memcached python-memcached
2. 編輯/etc/sysconfig/memcached文件並完成以下操作:
    * 配置服務以使用控制器節點的管理IP地址。這是為了通過管理網絡啟用其他節點的訪問:
OPTIONS="-l 10.88.66.15,::1"
 
完成安裝
* 啟動Memcached服務並將其配置為在系統引導時啟動:
# systemctl enable memcached.service
# systemctl start memcached.service
 
第三章 OpenStack驗證服務KeyStone
---------------------------------------------------------------------------------------
Keystone作用:用戶與認證:用戶權限與用戶行為跟蹤:
              服務目錄:提供一個服務目錄,包括所有服務項與相關Api的端點
User:用戶   Tenant:租戶 項目    Token:令牌   Role:角色   Service:服務   Endpoint:端點
----------------------------------------------------------------------------------------
1.安裝keystone
[root@openstack1 ~]# yum install -y openstack-keystone httpd mod_wsgi
 
[root@openstack1 ~]# openssl rand -hex 10        ----生成隨機碼
dc46816a3e103ec2a700
 
編輯文件 /etc/keystone/keystone.conf 並完成如下動作:
 
在``[DEFAULT]``部分,定義初始管理令牌的值:
 
[DEFAULT]
...
admin_token = ADMIN_TOKEN
使用前面步驟生成的隨機數替換``ADMIN_TOKEN`` 值。
 
在 [database] 部分,配置數據庫訪問:
 
[database]
...
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
將``KEYSTONE_DBPASS``替換為你為數據庫選擇的密碼。
 
在``[token]``部分,配置Fernet UUID令牌的提供者。
 
[token]
...
provider = fernet
初始化身份認證服務的數據庫:
 
 
 
完成后/etc/keystone/keystone.conf的配置
[root@openstack1 ~]# grep -vn '^$\|^#'  /etc/keystone/keystone.conf  
[DEFAULT]
admin_token = dc46816a3e103ec2a700
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
 
 
 
 
-----------------------------------------------------------------------------------------------
 
同步數據庫:注意權限,所以要用su -s 切換到keystone用戶下執行:
 
[root@openstack1 ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@openstack1 ~]# chown -R keystone:keystone /var/log/keystone/keystone.log  這個可以選
[root@openstack1 keystone]# mysql  -ukeystone -pkeystone keystone -e "use keystone;show tables;"
+------------------------+
| Tables_in_keystone     |
+------------------------+
| access_token           |
| assignment             |
| config_register        |
| consumer               |
| credential             |
| endpoint               |
| endpoint_group         |
| federated_user         |
| federation_protocol    |
| group                  |
| id_mapping             |
| identity_provider      |
| idp_remote_ids         |
| implied_role           |
| local_user             |
| mapping                |
| migrate_version        |
| nonlocal_user          |
| password               |
| policy                 |
| policy_association     |
| project                |
| project_endpoint       |
| project_endpoint_group |
| region                 |
| request_token          |
| revocation_event       |
| role                   |
| sensitive_config       |
| service                |
| service_provider       |
| token                  |
| trust                  |
| trust_role             |
| user                   |
| user_group_membership  |
| user_option            |
| whitelisted_config     |
+------------------------+
表已創建完畢,OK
 
初始化Fernet keys:
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
 
看到下面的的文件夾fernet-keys和credential-keys就說明上面初始化命令已經完成
[root@openstack1 ~]# ls -lh /etc/keystone/
total 136K
drwx------. 2 keystone keystone   24 Feb 28 14:16 credential-keys
-rw-r-----. 1 root     keystone 2.3K Nov  1 06:24 default_catalog.templates
drwx------. 2 keystone keystone   24 Feb 28 14:16 fernet-keys
-rw-r-----. 1 root     keystone 114K Feb 28 14:14 keystone.conf
-rw-r-----. 1 root     keystone 2.5K Nov  1 06:24 keystone-paste.ini
-rw-r-----. 1 root     keystone 1.1K Nov  1 06:24 logging.conf
-rw-r-----. 1 root     keystone    3 Nov  1 17:21 policy.json
-rw-r-----. 1 keystone keystone  665 Nov  1 06:24 sso_callback_template.html
 
 
 
 
 
----------------------------------------------------------------------------------
 
配置 Apache HTTP 服務器
 
編輯``/etc/httpd/conf/httpd.conf`` 文件,配置``ServerName`` 選項為控制節點:
Listen 0.0.0.0:80
ServerName localhost:80
 
必須要配置httpd的ServerName,否則keystone服務不能起來
 
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
 
下面的內容為文件 /etc/httpd/conf.d/wsgi-keystone.conf的內容。並用apache來代理它:5000  正常的api來訪問  35357  管理訪問的端口
[root@openstack1 ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
[root@openstack1 ~]# cat /etc/httpd/conf.d/wsgi-keystone.conf
Listen 0.0.0.0:5000
Listen 0.0.0.0:35357
 
<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    LimitRequestBody 114688
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone.log
    CustomLog /var/log/httpd/keystone_access.log combined
 
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
 
<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    LimitRequestBody 114688
    <IfVersion >= 2.4>
      ErrorLogFormat "%{cu}t %M"
    </IfVersion>
    ErrorLog /var/log/httpd/keystone.log
    CustomLog /var/log/httpd/keystone_access.log combined
 
    <Directory /usr/bin>
        <IfVersion >= 2.4>
            Require all granted
        </IfVersion>
        <IfVersion < 2.4>
            Order allow,deny
            Allow from all
        </IfVersion>
    </Directory>
</VirtualHost>
 
Alias /identity /usr/bin/keystone-wsgi-public
<Location /identity>
    SetHandler wsgi-script
    Options +ExecCGI
 
    WSGIProcessGroup keystone-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
</Location>
 
Alias /identity_admin /usr/bin/keystone-wsgi-admin
<Location /identity_admin>
    SetHandler wsgi-script
    Options +ExecCGI
 
    WSGIProcessGroup keystone-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
</Location>
 
 
哈哈,和N版比內容增加了不少
 
 
---------------------------------------------------------------------------------------------------
啟動 Apache HTTP 服務並配置其隨系統啟動:
[root@openstack1 ~]# systemctl enable httpd
[root@openstack1 ~]# systemctl start httpd
---------------------------------------------------------------------------------------------------
查看端口: 
[root@openstack1 ~]# netstat -lntup|grep httpd
tcp        0      0 0.0.0.0:35357           0.0.0.0:*               LISTEN      16721/http         
tcp        0      0 0.0.0.0:5000            0.0.0.0:*               LISTEN      16721/http         
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      16721/http
 
 
 
查看日志/var/log/keystone/keystone.log
沒有ERROR說明keystone啟動正常
[root@openstack1 ~]# tail /var/log/keystone/keystone.log
2018-03-05 09:54:52.488 91245 INFO migrate.versioning.api [-] done
2018-03-05 09:54:52.488 91245 INFO migrate.versioning.api [-] 3 -> 4...
2018-03-05 09:54:52.626 91245 INFO migrate.versioning.api [-] done
2018-03-05 09:55:23.417 91267 INFO keystone.common.fernet_utils [-] key_repository does not appear to exist; attempting to create it
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Created a new key: /etc/keystone/fernet-keys/0
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Starting key rotation with 1 key files: ['/etc/keystone/fernet-keys/0']
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Current primary key is: 0
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Next primary key will be: 1
2018-03-05 09:55:23.418 91267 INFO keystone.common.fernet_utils [-] Promoted key 0 to be the primary: 1
2018-03-05 09:55:23.419 91267 INFO keystone.common.fernet_utils [-] Created a new key: /etc/keystone/fernet-keys/0
---------------------------------------------------------------------------------------------------
 
創建驗證用戶及地址版本信息:
[root@openstack1 ~]# grep -n '^admin_token' /etc/keystone/keystone.conf
18:admin_token = dc46816a3e103ec2a700
 
[root@openstack1 ~]# export OS_TOKEN=dc46816a3e103ec2a700    -------設置環境變量
[root@openstack1 ~]# export OS_URL= http://10.88.66.15:35357/v3
[root@openstack1 ~]# export OS_IDENTITY_API_VERSION=3
 
創建域、項目、用戶和角色
 
身份認證服務為每個OpenStack服務提供認證服務。認證服務使用 T domains, projects (tenants), :term:`users<user>`和 :term:`roles<role>`的組合。
 
創建域``default``:
openstack domain create --description "Default Domain" default
 
在你的環境中,為進行管理操作,創建管理的項目、用戶和角色:
 
創建 admin 項目:
 
openstack project create --domain default --description "Admin Project" admin
 
注解
 
OpenStack 是動態生成 ID 的,因此您看到的輸出會與示例中的命令行輸出不相同。
 
創建 admin 用戶:
 
openstack user create --domain default --password-prompt admin
 
創建 admin 角色:
 
openstack role create admin
 
添加``admin`` 角色到 admin 項目和用戶上,並授權admin的角色:
 
[root@openstack1 ~]# openstack role add --project admin --user admin admin
注解
 
這個命令執行后沒有輸出。
注解
你創建的任何角色必須映射到每個OpenStack服務配置文件目錄(/etc/keystone/)下的``policy.json`` 文件中。默認策略是給予“admin“角色大部分服務的管理訪問權限。更多信息,參考 ``Operations Guide - Managing Projects and Users <http://docs.openstack.org/ops-guide/opsrojects-users.html>`__.
 
擴展:最好把注冊時已經添加的admin用戶刪除,因為你不知道密碼...
 
 
創建``demo`` 項目:
 
openstack project create --domain default --description "Demo Project" demo
注解
 
當為這個項目創建額外用戶時,不要重復這一步。
 
創建``demo`` 用戶:
 
openstack user create --domain default --password-prompt demo
 
創建 user 角色:
 
openstack role create user
 
添加 user``角色到 ``demo 項目和用戶:
 
openstack role add --project demo --user demo user
 
 
本指南使用一個你添加到你的環境中每個服務包含獨有用戶的service 項目。創建``service``項目:
openstack project create --domain default --description "Service Project" service
 
快速粘貼命令行
export OS_TOKEN=dc46816a3e103ec2a700
export OS_URL=http://10.88.66.15:35357/v3
export OS_IDENTITY_API_VERSION=3
openstack domain create --description "Default Domain" default
openstack project create --domain default --description "Admin Project" admin
openstack user create --domain default --password-prompt admin
openstack role create admin
openstack role add --project admin --user admin admin
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password-prompt demo
openstack role create user
openstack role add --project demo --user demo user
openstack project create --domain default --description "Service Project" service
--------------------------------------------------------------------------------------------------
 
查看創建的用戶及角色:
 
[root@openstack1 ~]# openstack user list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 6457b53386544638b44af8217d531f3f | demo  |
| a178e2b2c5234e2389c574b3474b5cc2 | admin |
+----------------------------------+-------+
[root@openstack1 ~]# openstack role list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| e2d5ea992b774b788504f3e6ec437fed | user  |
| eff56f6f078f41439f9c60a32a5cc411 | admin |
+----------------------------------+-------+
[root@openstack1 ~]# openstack project list
+----------------------------------+---------+
| ID                               | Name    |
+----------------------------------+---------+
| 63c27b21277a46dc90cf9bc50521f511 | demo    |
| bf40e05ce8c042a4ae0caa3dfd53f758 | service |
| e1f3d0d070534e14b2a538979b955bb7 | admin   |
+----------------------------------+---------+
 
-------------------------------------------------------------------------------------------------
 
創建glance用戶:
 
openstack user create --domain default --password=glance glance
將此用戶加入到項目里面並給它賦予admin的權限:
openstack role add --project service --user glance admin
 
創建nova用戶:
openstack user create --domain default --password=nova nova
openstack role add --project service --user nova admin
 
創建nova[placement]用戶:
openstack user create --domain default --password=placement placement
openstack role add --project service --user placement admin
 
創建neutron用戶:
openstack user create --domain default --password=neutron neutron
openstack role add --project service --user neutron admin
 
 
 
引導身份服務:
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://10.88.66.15:35357/v3/ --bootstrap-internal-url http://10.88.66.15:5000/v3/ --bootstrap-public-url http://10.88.66.15:5000/v3/ --bootstrap-region-id RegionOne
 
創建服務實體和API端點
在你的Openstack環境中,認證服務管理服務目錄。服務使用這個目錄來決定您的環境中可用的服務。
 
創建服務實體和身份認證服務:
 
[root@openstack1 ~]# openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | bf5c6f371d3541a083a8bc7a4f4a91a5 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
注解
 
OpenStack 是動態生成 ID 的,因此您看到的輸出會與示例中的命令行輸出不相同。
 
身份認證服務管理了一個與您環境相關的 API 端點的目錄。服務使用這個目錄來決定如何與您環境中的其他服務進行通信。
 
OpenStack使用三個API端點變種代表每種服務:admin,internal和public。默認情況下,管理API端點允許修改用戶和租戶而公共和內部APIs不允許這些操作。在生產環境中,處於安全原因,變種為了服務不同類型的用戶可能駐留在單獨的網絡上。對實例而言,公共API網絡為了讓顧客管理他們自己的雲在互聯網上是可見的。管理API網絡在管理雲基礎設施的組織中操作也是有所限制的。內部API網絡可能會被限制在包含OpenStack服務的主機上。此外,OpenStack支持可伸縮性的多區域。為了簡單起見,本指南為所有端點變種和默認``RegionOne``區域都使用管理網絡。
 
創建認證服務的 API 端點:
 
openstack endpoint create --region RegionOne identity public http://10.88.66.15:5000/v3
openstack endpoint create --region RegionOne identity internal http://10.88.66.15:5000/v3
openstack endpoint create --region RegionOne identity admin http://10.88.66.15:35357/v3
 
驗證操作
 
在安裝其他服務之前確認身份認證服務的操作。
注解
在控制節點上執行這些命令。
 
[root@openstack1 ~]# unset OS_TOKEN OS_URL
[root@openstack1 ~]# openstack --os-auth-url http://10.88.66.15:35357/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
Password:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:08:28+00:00                                                                                                                                                               |
| id         | gAAAAABam6l8hb5pRSRCSHlQEXvcGHNT1OeSB4U2ggAsDnDYSm0audYmYpw-A_HxkA4udKZkC7q1pnomxRw7Eim2EAkLlicORtORw_OwdrSoEhgLKehUv-8l-x28vP1zFUHA8eB1EbJXCZuaqZ8HtdbgwE005jGryF1H9jqRTzOSs9GFhUPgg |
| project_id | e1f3d0d070534e14b2a538979b955bb7                                                                                                                                                        |
| user_id    | a178e2b2c5234e2389c574b3474b5cc2                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
 
到此處說明keystone已經成功了
[root@openstack1 ~]# openstack --os-auth-url http://10.88.66.15:35357/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name demo --os-username demo token issue 
Password:
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:10:43+00:00                                                                                                                                                               |
| id         | gAAAAABam6oDRhGnlNikctPM2eeu-jZ7DkYRvl_KVD0AKww5yAjIb1soyORUxZ4Ga1V6N-Jl-CGttoNFNBO0Q7vtpIX9pTALXG7wbGjfVNIuWdnNblqf9pDNaPrQ97BS2TPIRw8lBEwsEynDi_j_3ogd0Uu0W5vhgaIGopj65R9h5t76QqiGRl8 |
| project_id | 63c27b21277a46dc90cf9bc50521f511                                                                                                                                                        |
| user_id    | d53e2219188c4438b27f055d8d5cacc8                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
 
 
創建 OpenStack 客戶端環境腳本
創建 admin 和 ``demo``項目和用戶創建客戶端環境變量腳本
[root@openstack1 ~]# vim admin-openstack.sh
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://10.88.66.15:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
 
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack token issue
+------------+------------------------------------------------------------------------------------------+
| Field      | Value                                                                                    |
+------------+------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:15:04+00:00                                                                |
| id         | gAAAAABam6sIQEo1XKq9ggR6wujmNy55yzjn1B8We9e_CQmfiyiFegTgSiVIpst45QmpAZEiRKk6CzltBKMP5xBi |
|            | AHfCUobdArDXYUoOTU_jJAvdhGZ05Ae8qD2Fny1Bh4eSRKgDpWMPJY_FzBNSGtIwGO2LQ42rXbw0TykvOQyoZGJM |
|            | X9QGmko                                                                                  |
| project_id | e1f3d0d070534e14b2a538979b955bb7                                                         |
| user_id    | a178e2b2c5234e2389c574b3474b5cc2                                                         |
+------------+------------------------------------------------------------------------------------------+
 
 
[root@openstack1 ~]# vim demo-openstack.sh           
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://10.88.66.15:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
 
[root@openstack1 ~]# source demo-openstack.sh     
[root@openstack1 ~]# openstack token issue   
+------------+------------------------------------------------------------------------------------------+
| Field      | Value                                                                                    |
+------------+------------------------------------------------------------------------------------------+
| expires    | 2018-03-04 09:17:29+00:00                                                                |
| id         | gAAAAABam6uZwCBZV717q4oFuTSJoZ_lFtBz2AS351y7UUbSfFR4FNnwfZl_xkKgvDE2HLyq4NTiPIbFziAaDWya |
|            | Tpm1p2U_yPfY8X1CI8CJ2lYV2qdMSFjEnHWjB4lNNeKTU4xocCRLGZCMFqOZX5eYjxXavbyATMyJnIfYFC1AD9fR |
|            | gWTUz4k                                                                                  |
| project_id | 63c27b21277a46dc90cf9bc50521f511                                                         |
| user_id    | d53e2219188c4438b27f055d8d5cacc8                                                         |
+------------+------------------------------------------------------------------------------------------+
 
 
第四章 OpenStack鏡像服務Glance
 
glance主要由三個部分組成:glance-api、glance-registry以及image store
glance-api:接受雲系統鏡像的創建、刪除、讀取請求
glance-registry:雲系統的鏡像注冊服務
 
1.先決條件
glance服務創建:
source admin-openstack.sh
openstack service create --name glance --description "OpenStack Image service" image
 
創建鏡像服務的 API 端點:
openstack endpoint create --region RegionOne   image public http://10.88.66.15:9292
openstack endpoint create --region RegionOne   image internal http://10.88.66.15:9292
openstack endpoint create --region RegionOne   image admin http://10.88.66.15:9292
 
[root@openstack1 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                          |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| 52912cb0e8db41b181b7ba219651dc87 | RegionOne | glance       | image        | True    | internal  | http://10.88.66.15:9292      |
| 645ce0753cda48dd8747f2db25b5fb26 | RegionOne | glance       | image        | True    | admin     | http://10.88.66.15:9292      |
| 790c71a14102407ca6b441853be6cf5b | RegionOne | glance       | image        | True    | public    | http://10.88.66.15:9292      |
| 84dda84836094b2494c3478406756373 | RegionOne | keystone     | identity     | True    | internal  | http://10.88.66.15:5000/v3/  |
| 9050f9fec5b1484684380e5adb32746f | RegionOne | keystone     | identity     | True    | admin     | http://10.88.66.15:35357/v3/ |
| de71ff11b10d4ab094d44dc58af7c68e | RegionOne | keystone     | identity     | True    | public    | http://10.88.66.15:5000/v3/  |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
 
 
2.安裝和配置組件
glance的安裝:
 
[root@openstack1 ~]# yum install -y openstack-glance python-glance python-glanceclient
 
 
編輯文件 /etc/glance/glance-api.conf和/etc/glance/glance-registry.conf  並完成如下動作:
在 [database] 部分,配置數據庫訪問:
[database]
...
connection = mysql+pymysql://glance:glance@10.88.66.15/glance
 
 
同步數據庫:
 
[root@openstack1 ~]# su -s /bin/sh -c "glance-manage db_sync" glance
 
檢查數據庫是否同步:
 
[root@openstack1 ~]# mysql -uglance -pglance  -e "use glance;show tables;"
+----------------------------------+
| Tables_in_glance                 |
+----------------------------------+
| alembic_version                  |
| image_locations                  |
| image_members                    |
| image_properties                 |
| image_tags                       |
| images                           |
| metadef_namespace_resource_types |
| metadef_namespaces               |
| metadef_objects                  |
| metadef_properties               |
| metadef_resource_types           |
| metadef_tags                     |
| migrate_version                  |
| task_info                        |
| tasks                            |
+----------------------------------+
 
 
 
配置keystone與glance-api.conf的鏈接:
 
編輯/etc/glance/glance-api.conf文件 [keystone_authtoken] 和 [paste_deploy] 部分,配置認證服務訪問:
 
 
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
 
[paste_deploy]
flavor = keystone
 
注:N版后keystone認證版本升級,注意配置時相應的提高版本配置,否則會出現openstack image list 報http 500的錯誤,后面的keystone認證版本都要改,但后面不在提示。
下面是報錯示范
[root@openstack1 ~]# openstack image list
Internal Server Error (HTTP 500)
 
在 [glance_store] 部分,配置本地文件系統存儲和鏡像文件位置:
 
[glance_store]
...
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
 
擴展:使用NFS作為鏡像文件存放位置
可以直接掛載NFS文件到鏡像文件的存放位置
mount -t nfs 10.30.1.203:/data /var/lib/glance/images
 
配置keystone與glance-registry.conf的鏈接:
編輯/etc/glance/glance-registry.conf文件 [keystone_authtoken] 和 [paste_deploy] 部分,配置認證服務訪問:
 
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
 
[paste_deploy]
...
flavor = keystone
 
 
查看/etc/glance/glance-api.conf和/etc/glance/glance-registry.conf是否和下面一樣
 
# grep -v '^#\|^$' /etc/glance/glance-api.conf
[DEFAULT]
[cors]
[cors.subdomain]
[database]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images
[image_format]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
 
 
# grep -v '^#\|^$' /etc/glance/glance-registry.conf
[DEFAULT]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
 
-------------------------------------------------------------------------------------------
 
啟動glance服務並設置開機啟動:
systemctl enable openstack-glance-api
systemctl enable openstack-glance-registry 
systemctl start openstack-glance-api 
systemctl start openstack-glance-registry
 
 
-------------------------------------------------------------------------------------------
 
監聽端口: registry:9191     api:9292
 
[root@openstack1 ~]# netstat -antup
tcp        0      0 0.0.0.0:9292            0.0.0.0:*               LISTEN      10845/python2              
tcp        0      0 0.0.0.0:9191            0.0.0.0:*               LISTEN      10864/python2 
-------------------------------------------------------------------------------------------
 
 
[root@openstack1 ~]#  glance image-list
+----+------+
| ID | Name |
+----+------+
+----+------+
 
如果執行glance image-list命令出現以上畫面則表示glance安裝成功了。
 
 
注:如果出現如下報錯示范,一般是/etc/glance/glance-api.conf或者/etc/glance/glance-registry.conf里auth_uri和auth_uri的配置有錯誤,在
Ocata版以前auth_uri=http://10.88.66.15:5000,Ocata版及以后為http://10.88.66.15:5000/v3
 
[root@openstack1 ~]# openstack image list
Internal Server Error (HTTP 500)
 
 
拓展:
glance image-list 和openstack image list命令的效果是一樣的
 
 
---------------------------------------------------------------------------------------------------
 
glance驗證操作
 
下載源鏡像:
 
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
openstack image create "cirros3.5"  --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | f8ab98ff5e73ebab884d80c9dc9c7290                     |
| container_format | bare                                                 |
| created_at       | 2019-02-18T02:48:39Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47/file |
| id               | 0b105463-a9a2-4f89-a4d7-75ae54dc4a47                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros3.5                                            |
| owner            | 6840d3aa8b814d9caa54432ce44471b6                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13267968                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2019-02-18T02:48:40Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+
 
 
 
 
------------------------------------------------------------------------------------------------
 
查看鏡像:
 
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------+--------+
| ID                                   | Name      | Status |
+--------------------------------------+-----------+--------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 | active |
+--------------------------------------+-----------+--------+
 
[root@openstack1 ~]# glance image-list        
glance image-list
+--------------------------------------+-----------+
| ID                                   | Name      |
+--------------------------------------+-----------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 |
+--------------------------------------+-----------+
 
鏡像存放位置:
[root@openstack1 ~]# find / -name "0b105463-a9a2-4f89-a4d7-75ae54dc4a47"
/var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
[root@openstack1 ~]# ls -lh /var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
-rw-r-----. 1 glance glance 13M Feb 18 10:48 /var/lib/glance/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47
 
------------------------------------------------------------------------------------------------
 
第五章 Openstack計算服務Nova
 
Nova控制節點(openstack虛擬機必備組件:keystone,glance,nova,neutron)
 
API:負責接收和響應外部請求,支持openstack API,EC2API
Cert:負責身份認證
Scheduler:用於雲主機調度
Conductor:計算節點訪問數據的中間件
Consoleleauth:用於控制台的授權驗證
Novncproxy:VNC代理
Nova API組件實現了RESTful API功能,是外部訪問Nova的唯一途徑。
 
接收外部請求並通過Message Queue將請求發送給其他的服務組件,同時也兼容EC2 API,所以也可以用EC2的管理
工具對nova進行日常管理。
 
Nova Scheduler模塊在openstack中的作用就是決策虛擬機創建在哪個主機(計算節點)上。
決策一個虛機應該調度到某物理節點,需要分兩個步驟:
 
         過濾(Fliter)             計算權值(Weight)
 
Fliter Scheduler首先得到未經過濾的主機列表,然后根據過濾屬性,選擇符合條件的計算節點主機。
經過主機過濾后,需要對主機進行權值的計算,根據策略選擇相應的某一台主機(對於每一個要創建的虛擬機而言)
 

1.先決條件
[root@openstack1 ~]# source admin-openstack.sh
 
 
nova服務創建:
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://10.88.66.15:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://10.88.66.15:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://10.88.66.15:8774/v2.1
 
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://10.88.66.15:8778
openstack endpoint create --region RegionOne placement internal http://10.88.66.15:8778
openstack endpoint create --region RegionOne placement admin http://10.88.66.15:8778
 
2.Nova控制節點部署    openstack1
首先我們需要先在控制節點部署除nova-compute之外的其它必備的服務。
安裝nova控制節點:
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
編輯``/etc/nova/nova.conf``文件並完成下面的操作:
在``[DEFAULT]``部分,只啟用計算和元數據API:
[DEFAULT]
...
enabled_apis = osapi_compute,metadata
在``[api_database]``和``[database]``部分,配置數據庫的連接:
[api_database]
...
connection = mysql+pymysql://nova:nova@10.88.66.15/nova_api
[database]
...
connection = mysql+pymysql://nova:nova@10.88.66.15/nova
 
在 “[DEFAULT]” 部分,配置 “RabbitMQ” 消息隊列訪問:
#在 “[DEFAULT]” 和 “[oslo_messaging_rabbit]”部分,配置 “RabbitMQ” 消息隊列訪問:
[DEFAULT]
...
 
# rpc_backend = rabbit
# [oslo_messaging_rabbit]
# ...
# rabbit_host = 10.88.66.15
# rabbit_userid = openstack
# rabbit_password = openstack
 
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置認證服務訪問:
[DEFAULT]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
 
 
注解
在 [keystone_authtoken] 中注釋或者刪除其他選項。
 
在該[DEFAULT]部分中,配置my_ip選項以使用控制器節點的管理接口IP地址:
[DEFAULT] 
#... 
my_ip  =  10.88.66.15
 
注:如果不配置my_ip選項,那么后面配置中有$my_ip的部分請變更為控制器節點的管理接口ip
在 [DEFAULT] 部分,使能 Networking 服務:
[DEFAULT]
...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
注解
默認情況下,計算服務使用內置的防火牆服務。由於網絡服務包含了防火牆服務,你必須使用``nova.virt.firewall.NoopFirewallDriver``防火牆服務來禁用掉計算服務內置的防火牆服務
在``[vnc]``部分,配置VNC代理使用控制節點的管理接口IP地址 :
 
[vnc]
...
enabled  =  true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
 
在 [glance] 區域,配置鏡像服務 API 的位置:
[glance]
...
api_servers = http://10.88.66.15:9292
在 [oslo_concurrency] 部分,配置鎖路徑:
[oslo_concurrency]
...
lock_path = /var/lib/nova/tmp
 
在該[placement]部分中,配置Placement API:
 
[placement]
# auth_uri = http://10.88.66.15:5000/v3
# auth_url = http://10.88.66.15:35357/v3
# memcached_servers = 10.88.66.15:11211
# os_region_name = RegionOne
# project_domain_name = default
# project_name = service
# user_domain_name = default
# username = placement
# password = placement
# auth_type = password
 
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
username = placement
password = placement
 
配置nova.conf文件
 
# grep -v "^#\|^$"  /etc/nova/nova.conf
[DEFAULT]
my_ip  =  10.88.66.15
transport_url = rabbit://openstack:openstack@10.88.66.15
auth_strategy = keystone
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
enabled_apis=osapi_compute,metadata
[api]
[api_database]
connection = mysql+pymysql://nova:nova@10.88.66.15/nova_api
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:nova@10.88.66.15/nova
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled  =  true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[DEFAULT]
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
compute_driver = libvirt.LibvirtDriver
osapi_compute_workers = 2
multi_host = True
enabled_apis = osapi_compute,metadata
osapi_compute_listen = 0.0.0.0
instance_name_template = instance-%08x
my_ip = 10.88.66.15
debug = True
instances_path = /var/lib/nova/instances
base_dir_name = _base
[api]
auth_strategy = keystone
[wsgi]
[scheduler]
workers = 2
driver = filter_scheduler
discover_hosts_in_cells_interval = 300
[filter_scheduler]
track_instance_changes = False
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter
[key_manager]
[database]
[api_database]
[glance]
api_servers = http://10.88.66.15:9292
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
project_domain_name = default
project_name = service
user_domain_name = default
password = nova
username = nova
auth_type = password
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
[conductor]
workers = 2
[cinder]
os_region_name = RegionOne
[libvirt]
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[placement]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_type = password
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = default
project_name = service
user_domain_name = default
password = neutron
username = neutron
auth_type = password
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.15
novncproxy_base_url = //http://43.239.121.156:6080/vnc_auto.html
 
 
由於 打包錯誤,您必須通過將以下配置添加到以下內容來啟用對Placement API的訪問/etc/httpd/conf.d/00-nova-placement-api.conf
 
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
 
00-nova-placement-api.conf的配置示范
Listen 8778
 
<VirtualHost *:8778>
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
  WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
  WSGIScriptAlias / /usr/bin/nova-placement-api
  <IfVersion >= 2.4>
    ErrorLogFormat "%M"
  </IfVersion>
  ErrorLog /var/log/nova/nova-placement-api.log
  #SSLEngine On
  #SSLCertificateFile ...
  #SSLCertificateKeyFile ...
</VirtualHost>
 
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
  SetHandler wsgi-script
  Options +ExecCGI
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
</Location>
 
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
 
# cat /etc/httpd/conf.d/00-nova-placement-api.conf
Listen 0.0.0.0:8778
 
<VirtualHost *:8778>
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
  WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
  WSGIScriptAlias / /usr/bin/nova-placement-api
  <IfVersion >= 2.4>
    ErrorLogFormat "%M"
  </IfVersion>
  ErrorLog /var/log/nova/nova-placement-api.log
  #SSLEngine On
  #SSLCertificateFile ...
  #SSLCertificateKeyFile ...
<Directory />
    Options All
    AllowOverride All
    Require all granted
  </Directory>
 
 
  <Directory /usr/bin/nova-placement-api>
    Options All
    AllowOverride All
    Require all granted
  </Directory>
</VirtualHost>
 
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
  SetHandler wsgi-script
  Options +ExecCGI
  WSGIProcessGroup nova-placement-api
  WSGIApplicationGroup %{GLOBAL}
  WSGIPassAuthorization On
</Location>
 
 
重啟 httpd服務:
systemctl restart httpd
同步 nova-api數據庫:
su -s /bin/sh -c "nova-manage api_db sync" nova
 
 注意
忽略此輸出中的任何棄用消息。
注冊cell0數據庫:
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
 
創建cell1單元格:
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
 
填充nova數據庫:
su -s /bin/sh -c "nova-manage db sync" nova
 
驗證nova cell0和cell1是否正確注冊:
# nova-manage cell_v2 list_cells
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
|  Name |                 UUID                 |            Transport URL            |               Database Connection                |
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
| cell0 | 00000000-0000-0000-0000-000000000000 |                none:/               | mysql+pymysql://nova:****@10.88.66.15/nova_cell0 |
| cell1 | 6e2b9b4b-cbd4-4f21-8d9d-fe2df37fb3e3 | rabbit://openstack:****@10.88.66.15 |    mysql+pymysql://nova:****@10.88.66.15/nova    |
+-------+--------------------------------------+-------------------------------------+--------------------------------------------------+
 
查看nova和nova_api,nova_cell0數據庫是否寫入成功
# mysql -unova -p'nova' -e "use nova_api;show tables;"
+------------------------------+
| Tables_in_nova_api           |
+------------------------------+
| aggregate_hosts              |
| aggregate_metadata           |
| aggregates                   |
| allocations                  |
.
.
.
| resource_provider_traits     |
| resource_providers           |
| traits                       |
| users                        |
+------------------------------+
# mysql -unova -p'nova' -e "use nova;show tables;"
+--------------------------------------------+
| Tables_in_nova                             |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
| certificates                               |
| compute_nodes                              |
.
.
.
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
# mysql -unova -p'nova' -e "use nova_cell0;show tables;"
+--------------------------------------------+
| Tables_in_nova_cell0                       |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
.
.
.
| shadow_snapshots                           |
| shadow_task_log                            |
| shadow_virtual_interfaces                  |
| shadow_volume_id_mappings                  |
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
 
 
完成安裝
啟動Compute服務並將其配置為在系統引導時啟動:
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-compute.service
systemctl start  openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-compute.service
 
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
 
 
N版以后nova部分改動較大,參考文檔: https://docs.openstack.org/nova/pike/install/controller-install-rdo.html
------------------------------------------------------------------------------------------------
 
3.Nova計算節點部署 openstack2
nova-compute一般運行在計算節點上,通過message queue接收並管理VM的生命周期
nova-compute通過libvirt管理KVM,通過XenAPI管理Xen
 
Nova計算節點基礎軟件包安裝已經在開始的部分完成了,這里不再敘述。
 
[root@openstack2 ~]# yum install -y openstack-nova-compute 
 
編輯``/etc/nova/nova.conf``文件並完成下面的操作:
 
在該[DEFAULT]部分中,僅啟用計算和元數據API:
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
 
在[DEFAULT]部分,配置``RabbitMQ``消息隊列的連接:
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
注: Openstack N版以后不在支持rpc_backend設置
在   [api] 和 [keystone_authtoken] 部分,配置認證服務訪問:
[api]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
 
注解
在 [keystone_authtoken] 中注釋或者刪除其他選項。
在 [DEFAULT] 部分,使能 Networking 服務:
[DEFAULT]
...
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
注解
缺省情況下,Compute 使用內置的防火牆服務。由於 Networking 包含了防火牆服務,所以你必須通過使用 nova.virt.firewall.NoopFirewallDriver 來去除 Compute 內置的防火牆服務。
 
在``[vnc]``部分,啟用並配置遠程控制台訪問:
 
[vnc]
...
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html 
 
服務器組件監聽所有的 IP 地址,而代理組件僅僅監聽計算節點管理網絡接口的 IP 地址。基本的 URL 指示您可以使用 web 瀏覽器訪問位於該計算節點上實例的遠程控制台的位置。
 
注解
如果你運行瀏覽器的主機無法解析``controller`` 主機名,你可以將 ``controller``替換為你控制節點管理網絡的IP地址。
在 [glance] 區域,配置鏡像服務 API 的位置:
 
[glance]
...
api_servers = http://10.88.66.15:9292
在 [oslo_concurrency] 部分,配置鎖路徑:
 
[oslo_concurrency]
...
lock_path = /var/lib/nova/tmp
 
在該[placement]部分中,配置Placement API:
 
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
 
 
 
 
[root@openstack2 ~]# grep -v '^#\|^$' /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack@10.88.66.15
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://10.88.66.15:35357/v3
username = placement
password = placement
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[root@openstack2 ~]# cat /etc/nova/nova.conf
[DEFAULT]
auth_strategy = keystone
use_neutron = True
compute_driver = libvirt.LibvirtDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api]
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
live_migration_bandwidth = 0
#live_migration_uri = qemu+tcp://%s/system
live_migration_uri = qemu+ ssh://stack@%s/system
#cpu_mode = none
virt_type = kvm
[matchmaker_redis]
[metrics]
[mks]
 
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
 
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver = messagingv2
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
 
[libvirt]
live_migration_bandwidth = 0
#live_migration_uri = qemu+tcp://%s/system
live_migration_uri = qemu+ ssh://stack@%s/system
#cpu_mode = none
virt_type = kvm
 
[placement]
 
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_url = http://10.88.66.15:35357/v3
auth_type = password
 
 
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://43.239.121.156:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
 
 
完成安裝
確定您的計算節點是否支持虛擬機的硬件加速。
 
$ egrep -c '(vmx|svm)' /proc/cpuinfo
如果這個命令返回了 one or greater 的值,那么你的計算節點支持硬件加速且不需要額外的配置。
 
如果這個命令返回了 zero 值,那么你的計算節點不支持硬件加速。你必須配置 libvirt 來使用 QEMU 去代替 KVM
在 /etc/nova/nova.conf 文件的 [libvirt] 區域做出如下的編輯:
[libvirt]
...
virt_type = qemu
 
 
 
啟動計算服務及其依賴,並將其配置為隨系統自動啟動:
 
[root@openstack2 ~]# systemctl enable libvirtd.service openstack-nova-compute.service
[root@openstack2 ~]# systemctl start libvirtd.service openstack-nova-compute.service
 
 
驗證是否成功:
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:16:40.000000 |
|  2 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:16:39.000000 |
|  3 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:16:39.000000 |
|  9 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:16:34.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
 
 
或者使用下面的命令做驗證
[root@openstack1 ~]# openstack host list
+------------+-------------+----------+
| Host Name  | Service     | Zone     |
+------------+-------------+----------+
| openstack1 | consoleauth | internal |
| openstack1 | scheduler   | internal |
| openstack1 | conductor   | internal |
| openstack2 | compute     | nova     |
+------------+-------------+----------+
 
[root@openstack1 ~]# nova service-list
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
| Id                                   | Binary           | Host       | Zone     | Status  | State | Updated_at                 | Disabled Reason | Forced down |
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
| 4790ca20-37c3-4fbf-92d1-72a7b584f6f6 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:19:10.000000 | -               | False       |
| 69a69d43-98c3-436e-866b-03d7944d4186 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:19:10.000000 | -               | False       |
| 14bb7cc2-0e80-4ef5-9f28-0775a69d7943 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:19:09.000000 | -               | False       |
| b20775d6-213e-403d-bfc5-2a3c3f6438e1 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:19:14.000000 | -               | False       |
+--------------------------------------+------------------+------------+----------+---------+-------+----------------------------+-----------------+-------------+
 
如果出現此四個服務則代表nova創建成功了
 
驗證nova與glance的連接,如下說明成功
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------+--------+
| ID                                   | Name      | Status |
+--------------------------------------+-----------+--------+
| 0b105463-a9a2-4f89-a4d7-75ae54dc4a47 | cirros3.5 | active |
+--------------------------------------+-----------+--------+
 
[root@openstack1 ~]# openstack image show d464af77-9588-43e7-a3d4-3f5f26000030
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | f8ab98ff5e73ebab884d80c9dc9c7290                     |
| container_format | bare                                                 |
| created_at       | 2019-02-18T02:48:39Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/0b105463-a9a2-4f89-a4d7-75ae54dc4a47/file |
| id               | 0b105463-a9a2-4f89-a4d7-75ae54dc4a47                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros3.5                                            |
| owner            | 6840d3aa8b814d9caa54432ce44471b6                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13267968                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2019-02-18T02:48:40Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+
 
 
注:由於到N版openstack時,nova image-list命令已經不支持了(變成glance image-list 或openstack image list),所以只能用上面的命令了
 
 
 
N版后官方推薦的驗證辦法:
# openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| ID | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-consoleauth | openstack1 | internal | enabled | up    | 2019-02-18T07:21:30.000000 |
|  2 | nova-scheduler   | openstack1 | internal | enabled | up    | 2019-02-18T07:21:40.000000 |
|  3 | nova-conductor   | openstack1 | internal | enabled | up    | 2019-02-18T07:21:40.000000 |
|  9 | nova-compute     | openstack2 | nova     | enabled | up    | 2019-02-18T07:21:34.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
 
驗證nova與keystone的連接,如下說明成功
# openstack catalog list
+-----------+-----------+------------------------------------------+
| Name      | Type      | Endpoints                                |
+-----------+-----------+------------------------------------------+
| nova      | compute   | RegionOne                                |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                          |                                         |
| keystone  | identity  | RegionOne                                |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           |                                          |
| placement | placement | RegionOne                                |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:8778        |
|           |           |                                          |
| glance    | image     | RegionOne                                |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           | RegionOne                                |
|           |           |   public: http://10.88.66.15:9292        |
|           |           |                                          |
+-----------+-----------+------------------------------------------+
 
# nova-status upgrade check
+---------------------------------------------------------------------+
| Upgrade Check Results                                               |
+---------------------------------------------------------------------+
| Check: Cells v2                                                     |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
| Check: Placement API                                                |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
| Check: Resource Providers                                           |
| Result: Success                                                     |
| Details: None                                                       |
+---------------------------------------------------------------------+
 
 
 
故障處理:openstack catalog list出現2個keystone....
 
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
| keystone  | identity  | RegionOne                                   |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           |                                             |
+-----------+-----------+---------------------------------------------+
 
 
 
解決辦法刪除多余的keystone服務
[root@openstack1 ~]# openstack service list
+----------------------------------+-----------+-----------+
| ID                               | Name      | Type      |
+----------------------------------+-----------+-----------+
| 41455c0be35b4eea8fb7caeecbc2f23f | nova      | compute   |
| 9d948d63775a48a7a34ce104852f079f | placement | placement |
| b9065427be214b5bb5a80f62e4f03e6c | glance    | image     |
| bf5c6f371d3541a083a8bc7a4f4a91a5 | keystone  | identity  |
| d6101418a4b6409db2b4865fd5ae5c9c | keystone  | identity  |
+----------------------------------+-----------+-----------+
[root@openstack1 ~]# openstack service show  bf5c6f371d3541a083a8bc7a4f4a91a5
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | bf5c6f371d3541a083a8bc7a4f4a91a5 |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+
[root@openstack1 ~]# openstack service show  d6101418a4b6409db2b4865fd5ae5c9c
+---------+----------------------------------+
| Field   | Value                            |
+---------+----------------------------------+
| enabled | True                             |
| id      | d6101418a4b6409db2b4865fd5ae5c9c |
| name    | keystone                         |
| type    | identity                         |
+---------+----------------------------------+
 
正確操作:
[root@openstack1 ~]# openstack service delete bf5c6f371d3541a083a8bc7a4f4a91a5
 
錯誤操作示范:
[root@openstack1 ~]# openstack service delete d6101418a4b6409db2b4865fd5ae5c9c
[root@openstack1 ~]# nova-status upgrade check
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Failure                                                   |
| Details: Placement service credentials do not work.               |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   http://docs.openstack.org/developer/nova/placement.html         |
|   for more details.                                               |
+-------------------------------------------------------------------+
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
+-----------+-----------+---------------------------------------------+
[root@openstack1 ~]# openstack service delete bf5c6f371d3541a083a8bc7a4f4a91a5
Failed to delete consumer with type, name or ID 'bf5c6f371d3541a083a8bc7a4f4a91a5': admin endpoint for identity service not found
1 of 1 services failed to delete.
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  |                                             |
+-----------+-----------+---------------------------------------------+
 
 
-----------------------------------------------------------------------------------------------------------------------------------------------
刪除后出現這么的情況,哈哈 不要慌,不要絕望,分析后我們找到了解決辦法
[root@openstack1 ~]# openstack service list
admin endpoint for identity service not found
[root@openstack1 ~]# openstack endpoint list
admin endpoint for identity service not found
重新設置引導身份服務:
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://10.88.66.15:35357/v3/ --bootstrap-internal-url http://10.88.66.15:5000/v3/ --bootstrap-public-url http://10.88.66.15:5000/v3/ --bootstrap-region-id RegionOne
 
 
查看是否恢復正常
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack service list
+----------------------------------+-----------+-----------+
| ID                               | Name      | Type      |
+----------------------------------+-----------+-----------+
| 41455c0be35b4eea8fb7caeecbc2f23f | nova      | compute   |
| 9d948d63775a48a7a34ce104852f079f | placement | placement |
| b9065427be214b5bb5a80f62e4f03e6c | glance    | image     |
| bf5c6f371d3541a083a8bc7a4f4a91a5 | keystone  | identity  |
+----------------------------------+-----------+-----------+
[root@openstack1 ~]# openstack catalog list
+-----------+-----------+---------------------------------------------+
| Name      | Type      | Endpoints                                   |
+-----------+-----------+---------------------------------------------+
| nova      | compute   | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8774/v2.1    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8774/v2.1 |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8774/v2.1   |
|           |           |                                             |
| placement | placement | RegionOne                                   |
|           |           |   public: http://10.88.66.15:8778        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:8778         |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:8778      |
|           |           |                                             |
| glance    | image     | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:9292      |
|           |           | RegionOne                                   |
|           |           |   public: http://10.88.66.15:9292        |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:9292         |
|           |           |                                             |
| keystone  | identity  | RegionOne                                   |
|           |           |   public: http://10.88.66.15:5000/v3/    |
|           |           | RegionOne                                   |
|           |           |   internal: http://10.88.66.15:5000/v3/  |
|           |           | RegionOne                                   |
|           |           |   admin: http://10.88.66.15:35357/v3/    |
|           |           |                                             |
+-----------+-----------+---------------------------------------------+
[root@openstack1 ~]# openstack user list
+----------------------------------+-----------+
| ID                               | Name      |
+----------------------------------+-----------+
| 02c2f92b32634152953918e7ca13d14d | nova      |
| 1a8166683c8d40aeba637c45a048df61 | glance    |
| 3a48f785d42949e4ab29787ba529df68 | admin     |
| 6693774dd0a545d09905461ea7ad3c84 | demo      |
| 8ff779da2c264adc92fc1b66a5c327a6 | placement |
| 9176221496ed45e6b34cbc6110f13e2b | neutron   |
+----------------------------------+-----------+
[root@openstack1 ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                          |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| 1333885c9bad499b94143429e3c38929 | RegionOne | glance       | image        | True    | internal  | http://10.88.66.15:9292      |
| 153b5834edf1475a9f99ced71bd92a0f | RegionOne | glance       | image        | True    | public    | http://10.88.66.15:9292      |
| 1b9f96da57fb403a863f9dd5aad39421 | RegionOne | placement    | placement    | True    | public    | http://10.88.66.15:8778      |
| 1ddbe1ec55a040d9a8561e3e5fe715fc | RegionOne | keystone     | identity     | True    | public    | http://10.88.66.15:5000/v3/  |
| 304c7f89985343b79bc3cadb94e87c64 | RegionOne | glance       | image        | True    | admin     | http://10.88.66.15:9292      |
| 5fcd1f1eb77c4d5c99101a20fe6326c0 | RegionOne | nova         | compute      | True    | admin     | http://10.88.66.15:8774/v2.1 |
| 68a4d1c7495f497fa40332145934316e | RegionOne | nova         | compute      | True    | internal  | http://10.88.66.15:8774/v2.1 |
| 833c55c98183435e82f57e759b177d76 | RegionOne | keystone     | identity     | True    | internal  | http://10.88.66.15:5000/v3/  |
| b0c08596c6744f0487f2b86b45360313 | RegionOne | keystone     | identity     | True    | admin     | http://10.88.66.15:35357/v3/ |
| ce8a5d83f2434b5fae7707dd593a7b58 | RegionOne | nova         | compute      | True    | public    | http://10.88.66.15:8774/v2.1 |
| d8b4085f5c3d4b31b1f9905bd8484edc | RegionOne | placement    | placement    | True    | admin     | http://10.88.66.15:8778      |
| df402d8d32d64ea18082dcbc8704a91d | RegionOne | placement    | placement    | True    | internal  | http://10.88.66.15:8778      |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
[root@openstack1 ~]#
 
 
 
這里在補充一些類似的錯誤:
2018-07-19 12:45:03,998 - util.py[WARNING]: 'http://169.254.169.254/2009-04-04/meta-data/instance-id' failed [114/120s]: url error [[Errno 111] Connection refused]
2018-07-19 12:45:11,005 - DataSourceEc2.py[CRITICAL]: giving up on md after 121 seconds
 
原因分析:
計算節點的nova-api,沒有啟動。通過日志排除錯誤:
root@node18:~# tail /var/log/nova/nova-api.log
2018-07-19 20:21:29 CRITICAL nova [-] No module named keystone.middleware.auth_token
2018-07-19 20:27:10 CRITICAL nova [-] No module named keystone.middleware.auth_token
解決方案:
計算節點無法找到keystone。安裝keystone client就可以了。
yum install -y python-keystone python-keystoneclient
 
 
報錯:Placement service credentials do not work. 
 
[root@openstack2 ~]# cat /var/log/nova/nova-compute.log
2019-02-28 17:27:00.024 30253 WARNING nova.scheduler.client.report [req-e963d119-3730-4651-a057-cd7961cf43eb - - - - -] Placement service credentials do not work. Placement is optional in Newton, but required in Ocata. Please enable the placement service before upgrading.: Unauthorized: The request you have made requires authentication. (HTTP 401) (Request-ID: req-9b309a61-59a9-4188-8197-7fbb15d1f72e)
2019-02-28 17:27:00.871 30253 ERROR nova.compute.manager [req-e963d119-3730-4651-a057-cd7961cf43eb - - - - -] Error updating resources for node openstack2.: ResourceProviderCreationFailed: Failed to create resource provider openstack2
 
# nova-status upgrade check
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Failure                                                   |
| Details: Placement service credentials do not work.               |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   http://docs.openstack.org/developer/nova/placement.html         |
|   for more details.                                               |
+-------------------------------------------------------------------+
 
解決辦法:https://ask.openstack.org/en/question/105800/ocatathe-placement-api-endpoint-not-found-placement-is-optional-in-newton-but-required-in-ocata-please-enable-the-placement-service-before-upgrading/
 
報錯:Cells v2  Failure
[root@node2 ~]# nova-status upgrade check
Option "os_region_name" from group "placement" is deprecated. Use option "region-name" from group "placement".
+-------------------------------------------------------------------+
| Upgrade Check Results                                             |
+-------------------------------------------------------------------+
| Check: Cells v2                                                   |
| Result: Failure                                                   |
| Details: No host mappings found but there are compute nodes. Run  |
|   command 'nova-manage cell_v2 simple_cell_setup' and then        |
|   retry.                                                          |
+-------------------------------------------------------------------+
| Check: Placement API                                              |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: Resource Providers                                         |
| Result: Warning                                                   |
| Details: There are no compute resource providers in the Placement |
|   service but there are 2 compute nodes in the deployment.        |
|   This means no compute nodes are reporting into the              |
|   Placement service and need to be upgraded and/or fixed.         |
|   See                                                             |
|   https://docs.openstack.org/nova/latest/user/placement.html      |
|   for more details.                                               |
+-------------------------------------------------------------------+
| Check: Ironic Flavor Migration                                    |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
| Check: API Service Version                                        |
| Result: Success                                                   |
| Details: None                                                     |
+-------------------------------------------------------------------+
 
解決辦法:nova-manage cell_v2 simple_cell_setup
 
--------------------------------------------------------------------------------------------------------------------------------------------
 
 
第六章 Openstack網絡服務Neutron
 
1.先決條件
注冊neutron網絡服務:
 
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack service create --name neutron --description "OpenStack Networking" network
[root@openstack1 ~]# openstack endpoint create --region RegionOne network public http://10.88.66.15:9696
[root@openstack1 ~]# openstack endpoint create --region RegionOne network internal http://10.88.66.15:9696
[root@openstack1 ~]# openstack endpoint create --region RegionOne network admin http://10.88.66.15:9696
 
2.配置網絡選項
 
Neutron在控制節點部署  openstack1
[root@openstack1 ~]# yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
 
Neutron在計算節點中的部署  openstack2
[root@openstack2 ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset
 
 
由於Neutron的配置根據網絡的不同也會有些區別,下面的示范以最常用的使用Linuxbridge驅動的flat網絡模型
 
Neutron控制節點配置  openstack1
編輯/etc/neutron/neutron.conf文件並完成如下操作:
 
在 [database] 部分,配置數據庫訪問:
 
[database]
...
connection = mysql+pymysql://neutron:neutron@10.88.66.15/neutron
 
在``[DEFAULT]``部分,啟用ML2插件並禁用其他插件:
[DEFAULT]
...
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
 
在該[DEFAULT]部分中,啟用模塊化第2層(ML2)插件並禁用其他插件:
[DEFAULT]
# ...
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置認證服務訪問:
 
[DEFAULT]
...
auth_strategy = keystone
[keystone_authtoken]
...
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
 
在 [DEFAULT]部分,配置 “RabbitMQ” 消息隊列的連接:
 
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
 
 
注解
 
在 [keystone_authtoken] 中注釋或者刪除其他選項。
在``[DEFAULT]``和``[nova]``部分,配置網絡服務來通知計算節點的網絡拓撲變化:
[DEFAULT]
...
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[nova]
...
memcached_servers = 10.88.66.15:11211
signing_dir = /var/cache/neutron
project_domain_name = Default
project_name = service
user_domain_name = Default
password = nova
username = nova
auth_url = http://10.88.66.15:35357/v3
auth_type = password
region_name = RegionOne
 
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
 
在 [oslo_concurrency] 部分,配置鎖路徑:
[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp
 
 
# grep -v "^#\|^$" /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
auth_strategy = keystone
transport_url = rabbit://openstack:openstack@10.88.66.15
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[agent]
[cors]
[database]
connection = mysql+pymysql://neutron:neutron@10.88.66.15/neutron
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[matchmaker_redis]
[nova]
auth_url = http://10.88.66.15:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
[DEFAULT]
service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
rpc_state_report_workers = 0
api_workers = 2
notify_nova_on_port_data_changes = True
notify_nova_on_port_status_changes = True
auth_strategy = keystone
allow_overlapping_ips = True
debug = True
core_plugin = ml2
bind_host = 0.0.0.0
use_syslog = False
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[matchmaker_redis]
[nova]
memcached_servers = 10.88.66.15:11211
signing_dir = /var/cache/neutron
project_domain_name = Default
project_name = service
user_domain_name = Default
password = nova
username = nova
auth_url = http://10.88.66.15:35357/v3
auth_type = password
region_name = RegionOne
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
 
配置 Modular Layer 2 (ML2) 插件
ML2插件使用Linuxbridge機制來為實例創建layer-2虛擬網絡基礎設施
 
編輯 /etc/neutron/plugins/ml2/ml2_conf.ini文件並完成以下操作:
 
在``[ml2]``部分,啟用flat和VLAN網絡:
 
[ml2]
...
type_drivers = flat,vlan,gre,vxlan,geneve
在``[ml2]``部分,禁用私有網絡:
 
[ml2]
...
tenant_network_types =vlan
在``[ml2]``部分,啟用Linuxbridge機制:
 
[ml2]
...
mechanism_drivers = linuxbridge,openvswitch
警告
 
在你配置完ML2插件之后,刪除可能導致數據庫不一致的``type_drivers``項的值。
在``[ml2]`` 部分,啟用端口安全擴展驅動:
[ml2]
...
extension_drivers = port_security
在``[ml2_type_flat]``部分,配置公共虛擬網絡為flat網絡
[ml2_type_flat]
...
flat_networks = external
在 ``[securitygroup]``部分,啟用 ipset 增加安全組規則的高效性:
[securitygroup]
...
enable_ipset = true
 
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types = vlan
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = true
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
tenant_network_types = vlan
extension_drivers = port_security
mechanism_drivers = linuxbridge
type_drivers = local,flat,vlan,gre,vxlan,geneve
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = True
 
配置Linuxbridge代理
Linuxbridge代理為實例建立layer-2虛擬網絡並且處理安全組規則。
 
編輯``/etc/neutron/plugins/ml2/linuxbridge_agent.ini``文件並且完成以下操作:
 
在該[linux_bridge]部分中,將提供者虛擬網絡映射到提供者物理網絡接口:
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
 
 
在``[vxlan]``部分,禁止VXLAN覆蓋網絡:
 
[vxlan]
enable_vxlan = false
在 ``[securitygroup]``部分,啟用安全組並配置 Linuxbridge iptables firewall driver:
[securitygroup]
...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
 
配置DHCP代理
The DHCP agent provides DHCP services for virtual networks.
 
編輯/etc/neutron/dhcp_agent.ini文件並完成下面的操作:
 
在``[DEFAULT]``部分,配置Linuxbridge驅動接口,DHCP驅動並啟用隔離元數據,這樣在公共網絡上的實例就可以通過網絡來訪問元數據
 
[DEFAULT]
...
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
 
[root@openstack1 ~]# grep -v "^#\|^$" /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
[agent]
[ovs]
 
 
 
 
配置元數據代理
The :term:`metadata agent <Metadata agent>`負責提供配置信息,例如:訪問實例的憑證
編輯``/etc/neutron/metadata_agent.ini``文件並完成以下操作:
在``[DEFAULT]`` 部分,配置元數據主機以及共享密碼:
[DEFAULT]
...
nova_metadata_ip = 10.88.66.15
metadata_proxy_shared_secret = syscloud.cn
 
# grep -v '^#\|^$' /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = 10.88.66.15
metadata_proxy_shared_secret = syscloud.cn
[agent]
[cache]
 
 
 
配置l3
 
# grep -v '^#\|^$' /etc/neutron/l3_agent.ini
[DEFAULT]
ovs_use_veth = False
interface_driver = linuxbridge
debug = True
[agent]
[ovs]
 
 
 
 
為計算節點配置網絡服務
編輯``/etc/nova/nova.conf``文件並完成以下操作:
 
在``[neutron]``部分,配置訪問參數,啟用元數據代理並設置密碼:
 
 
[neutron]
...
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = syscloud.cn
 
 
完成安裝
網絡服務初始化腳本需要一個超鏈接 /etc/neutron/plugin.ini``指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini``。如果超鏈接不存在,使用下面的命令創建它:
 
[root@openstack1 ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
 
同步數據庫:
 
[root@openstack1 ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
注解
 
數據庫的同步發生在 Networking 之后,因為腳本需要完成服務器和插件的配置文件。
 
重啟計算API 服務:
 
[root@openstack1 ~]# systemctl restart openstack-nova-api.service
當系統啟動時,啟動 Networking 服務並配置它啟動。
 
對於兩種網絡選項:
 
[root@openstack1 ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@openstack1 ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
對於網絡選項2,同樣啟用layer-3服務並設置其隨系統自啟動
 
[root@openstack1 ~]# systemctl enable neutron-l3-agent.service
[root@openstack1 ~]# systemctl start neutron-l3-agent.service
 
 
檢驗nentron在控制節點是否OK
[root@openstack1 ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 36134331-0c29-4eaa-b287-93e69836d419 | DHCP agent         | openstack1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
| 67b10d2b-2438-40e1-8402-70219cd5100c | Metadata agent     | openstack1 | None              | :-)   | UP    | neutron-metadata-agent    |
| 6e40171c-6be3-49a7-93d0-ee54ce831025 | Linux bridge agent | openstack1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 7fbb4072-6358-4cf6-8b6e-9631bb0c9eac | L3 agent           | openstack1 | nova              | :-)   | UP    | neutron-l3-agent          |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
 
 
 
終極檢驗示范:
[root@openstack1 ~]# openstack extension list --network
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| Name                                                                                         | Alias                     | Description                                                                                                                                              |
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
| Default Subnetpools                                                                          | default-subnetpools       | Provides ability to mark and use a subnetpool as the default                                                                                             |
| Network IP Availability                                                                      | network-ip-availability   | Provides IP availability data for each network and subnet.                                                                                               |
| Network Availability Zone                                                                    | network_availability_zone | Availability zone support for network.                                                                                                                   |
| Network MTU (writable)                                                                       | net-mtu-writable          | Provides a writable MTU attribute for a network resource.                                                                                                |
| Port Binding                                                                                 | binding                   | Expose port bindings of a virtual port to external application                                                                                           |
| agent                                                                                        | agent                     | The agent management extension.                                                                                                                          |
| Subnet Allocation                                                                            | subnet_allocation         | Enables allocation of subnets from a subnet pool                                                                                                         |
| DHCP Agent Scheduler                                                                         | dhcp_agent_scheduler      | Schedule networks among dhcp agents                                                                                                                      |
| Tag support                                                                                  | tag                       | Enables to set tag on resources.                                                                                                                         |
| Neutron external network                                                                     | external-net              | Adds external network attribute to network resource.                                                                                                     |
| Neutron Service Flavors                                                                      | flavors                   | Flavor specification for Neutron advanced services                                                                                                       |
| Network MTU                                                                                  | net-mtu                   | Provides MTU attribute for a network resource.                                                                                                           |
| Availability Zone                                                                            | availability_zone         | The availability zone extension.                                                                                                                         |
| Quota management support                                                                     | quotas                    | Expose functions for quotas management per tenant                                                                                                        |
| Tag support for resources with standard attribute: trunk, policy, security_group, floatingip | standard-attr-tag         | Enables to set tag on resources with standard attribute.                                                                                                 |
| If-Match constraints based on revision_number                                                | revision-if-match         | Extension indicating that If-Match based on revision_number is supported.                                                                                |
| Provider Network                                                                             | provider                  | Expose mapping of virtual networks to physical networks                                                                                                  |
| Multi Provider Network                                                                       | multi-provider            | Expose mapping of virtual networks to multiple physical networks                                                                                         |
| Quota details management support                                                             | quota_details             | Expose functions for quotas usage statistics per project                                                                                                 |
| Address scope                                                                                | address-scope             | Address scopes extension.                                                                                                                                |
| Subnet service types                                                                         | subnet-service-types      | Provides ability to set the subnet service_types field                                                                                                   |
| Resource timestamps                                                                          | standard-attr-timestamp   | Adds created_at and updated_at fields to all Neutron resources that have Neutron standard attributes.                                                    |
| Neutron Service Type Management                                                              | service-type              | API for retrieving service providers for Neutron advanced services                                                                                       |
| Tag support for resources: subnet, subnetpool, port, router                                  | tag-ext                   | Extends tag support to more L2 and L3 resources.                                                                                                         |
| Neutron Extra DHCP options                                                                   | extra_dhcp_opt            | Extra options configuration for DHCP. For example PXE boot options to DHCP clients can be specified (e.g. tftp-server, server-ip-address, bootfile-name) |
| Resource revision numbers                                                                    | standard-attr-revisions   | This extension will display the revision number of neutron resources.                                                                                    |
| Pagination support                                                                           | pagination                | Extension that indicates that pagination is enabled.                                                                                                     |
| Sorting support                                                                              | sorting                   | Extension that indicates that sorting is enabled.                                                                                                        |
| security-group                                                                               | security-group            | The security groups extension.                                                                                                                           |
| RBAC Policies                                                                                | rbac-policies             | Allows creation and modification of policies that control tenant access to resources.                                                                    |
| standard-attr-description                                                                    | standard-attr-description | Extension to add descriptions to standard attributes                                                                                                     |
| Port Security                                                                                | port-security             | Provides port security                                                                                                                                   |
| Allowed Address Pairs                                                                        | allowed-address-pairs     | Provides allowed address pairs                                                                                                                           |
| project_id field enabled                                                                     | project-id                | Extension that indicates that project_id field is enabled.                                                                                               |
+----------------------------------------------------------------------------------------------+---------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
 
 
Neutron計算節點配置  openstack2
 
neutron計算節點:(將neutron的配置文件拷貝到計算節點)
 
編輯/etc/neutron/neutron.conf文件並完成以下操作:
 
在該[database]部分中,注釋掉任何connection選項,因為計算節點不直接訪問數據庫。
 
在該[DEFAULT]部分中,配置RabbitMQ 消息隊列訪問:
 
[DEFAULT]
...
transport_url = rabbit://openstack:openstack@10.88.66.15
 
在 “[DEFAULT]” 和 “[keystone_authtoken]” 部分,配置認證服務訪問:
 
[DEFAULT]
...
auth_strategy = keystone
 
[keystone_authtoken]
...
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
 
在 [oslo_concurrency] 部分,配置鎖路徑:
 
[oslo_concurrency]
...
lock_path = /var/lib/neutron/tmp
 
 
# grep -v '^#\|^$' /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:openstack@10.88.66.15
[agent]
[cors]
[database]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000
auth_url = http://10.88.66.15:35357
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[quotas]
[ssl]
配置網絡選項
選擇與您之前在控制節點上選擇的相同的網絡選項。之后,回到這里並進行下一步:為計算節點配置網絡服務。
 
配置Linux網橋代理
Linux網橋代理為實例構建第2層(橋接和交換)虛擬網絡基礎結構並處理安全組。
 
編輯/etc/neutron/plugins/ml2/linuxbridge_agent.ini文件並完成以下操作:
 
在本[linux_bridge]節中,將提供者虛擬網絡映射到提供者物理網絡接口:
 
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
替換PROVIDER_INTERFACE_NAME為底層提供商物理網絡接口的名稱。有關 更多信息,請參閱主機網絡
 
在該[vxlan]部分中,禁用VXLAN覆蓋網絡:
 
[vxlan]
enable_vxlan = false
在本[securitygroup]節中,啟用安全組並配置Linux網橋iptables防火牆驅動程序:
 
[securitygroup]
...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
 
 
[root@openstack2 ~]# grep -v "^#\|^$" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
為計算節點配置網絡服務
編輯/etc/nova/nova.conf文件並完成下面的操作:
 
在``[neutron]`` 部分,配置訪問參數:
 
[neutron]
...
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
 
[root@openstack2 ~]#  grep -v "^#\|^$"  /etc/nova/nova.conf
[DEFAULT]
auth_strategy = keystone
use_neutron = True
compute_driver = libvirt.LibvirtDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api]
[api_database]
[barbican]
[cache]
[cells]
[cinder]
[cloudpipe]
[conductor]
[console]
[consoleauth]
[cors]
[cors.subdomain]
[crypto]
[database]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://10.88.66.15:9292
[guestfs]
[healthcheck]
[hyperv]
[image_file_url]
[ironic]
[key_manager]
[keystone_authtoken]
auth_uri = http://10.88.66.15:5000/v3
auth_url = http://10.88.66.15:35357/v3
memcached_servers = 10.88.66.15:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
[libvirt]
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://10.88.66.15:9696
auth_url = http://10.88.66.15:35357/v3
service_metadata_proxy = True
metadata_proxy_shared_secret = syscloud.cn
region_name = RegionOne
auth_strategy = keystone
project_domain_name = Default
project_name = service
user_domain_name = Default
password = neutron
username = neutron
auth_type = password
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
driver = messagingv2
[oslo_messaging_rabbit]
rabbit_host = 10.88.66.15
rabbit_userid = openstack
rabbit_password = openstack
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[libvirt]
inject_password = true
inject_partition = -1
live_migration_bandwidth = 0
live_migration_uri = qemu+ ssh://stack@%s/system
virt_type = kvm
[placement]
os_region_name = RegionOne
project_domain_name = default
project_name = service
user_domain_name = default
password = placement
username = placement
auth_url = http://10.88.66.15:35357/v3
auth_type = password
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = 10.88.66.16
novncproxy_base_url = http://10.88.66.15:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
 
 
 
完成安裝
重啟計算服務:
 
[root@openstack2 ~]# systemctl restart openstack-nova-compute.service
啟動Linuxbridge代理並配置它開機自啟動:
 
[root@openstack2 ~]# systemctl enable neutron-linuxbridge-agent.service
[root@openstack2 ~]# systemctl start neutron-linuxbridge-agent.service
 
檢驗nentron在計算節點是否OK
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 36134331-0c29-4eaa-b287-93e69836d419 | DHCP agent         | openstack1 | nova              | :-)   | UP    | neutron-dhcp-agent        |
| 67b10d2b-2438-40e1-8402-70219cd5100c | Metadata agent     | openstack1 | None              | :-)   | UP    | neutron-metadata-agent    |
| 6e40171c-6be3-49a7-93d0-ee54ce831025 | Linux bridge agent | openstack1 | None              | :-)   | UP    | neutron-linuxbridge-agent |
| 7fbb4072-6358-4cf6-8b6e-9631bb0c9eac | L3 agent           | openstack1 | nova              | :-)   | UP    | neutron-l3-agent          |
| c5fbf4e0-0d72-40b0-bb53-c383883a0d19 | Linux bridge agent | openstack2 | None              | :-)   | UP    | neutron-linuxbridge-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
 
代表計算節點的Linux bridge agent已成功連接到控制節點。
 
 
----------------------------------------------------------------------------------------------------------------
 
 
 
第七章 Openstack管理服務Horizon
 
安裝軟件包:
 
# yum install openstack-dashboard -y
編輯文件 /etc/openstack-dashboard/local_settings 並完成如下動作:
 
在 controller 節點上配置儀表盤以使用 OpenStack 服務:
 
OPENSTACK_HOST = "10.88.66.15"
允許所有主機訪問儀表板:
 
ALLOWED_HOSTS = ['*', ]
配置 memcached 會話存儲服務:
 
#SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': '10.88.66.15:11211',
    }
}
 
 
 
啟用第3版認證API:
 
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
啟用對域的支持
 
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
配置API版本:
 
OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "volume": 2,
     "image": 2,
     "compute": 2,
}
通過儀表盤創建用戶時的默認域配置為 default :
 
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
通過儀表盤創建的用戶默認角色配置為 user :
 
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
如果您選擇網絡參數1,禁用支持3層網絡服務:
 
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': True,
    'enable_ha_router': True,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
}
可以選擇性地配置時區:
 
TIME_ZONE = "Asia/Shanghai"
 
 
 
 
最終配置示范:
# grep -v '#\|^$' /etc/openstack-dashboard/local_settings
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = False
WEBROOT = '/dashboard/'
ALLOWED_HOSTS = ['*', ]
OPENSTACK_API_VERSIONS = {
     "identity": 3,
     "volume": 2,
     "image": 2,
     "compute": 2,
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
LOCAL_PATH = '/tmp'
SECRET_KEY='3f508e8a4399dffa3323'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
CACHES = {
    'default': {
        'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
        'LOCATION': '10.88.66.15:11211',
    },
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
OPENSTACK_HOST = "0.0.0.0"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_BACKEND = {
    'name': 'native',
    'can_edit_user': True,
    'can_edit_group': True,
    'can_edit_project': True,
    'can_edit_domain': True,
    'can_edit_role': True,
}
OPENSTACK_HYPERVISOR_FEATURES = {
    'can_set_mount_point': False,
    'can_set_password': True,
    'requires_keypair': False,
    'enable_quotas': True
}
OPENSTACK_CINDER_FEATURES = {
    'enable_backup': True,
}
OPENSTACK_NEUTRON_NETWORK = {
    'enable_router': True,
    'enable_quotas': True,
    'enable_ipv6': True,
    'enable_distributed_router': True,
    'enable_ha_router': True,
    'enable_lb': True,
    'enable_firewall': True,
    'enable_vpn': True,
    'enable_fip_topology_check': True,
}
OPENSTACK_HEAT_STACK = {
    'enable_user_pass': True,
}
IMAGE_CUSTOM_PROPERTY_TITLES = {
    "architecture": _("Architecture"),
    "kernel_id": _("Kernel ID"),
    "ramdisk_id": _("Ramdisk ID"),
    "image_state": _("Euca2ools state"),
    "project_id": _("Project ID"),
    "image_type": _("Image Type"),
}
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
INSTANCE_LOG_LENGTH = 35
DROPDOWN_MAX_ITEMS = 30
TIME_ZONE = "Asia/Shanghai"
POLICY_FILES_PATH = '/etc/openstack-dashboard'
LOGGING = {
    'version': 1,
    'disable_existing_loggers': False,
    'formatters': {
        'operation': {
            'format': '%(asctime)s %(message)s'
        },
    },
    'handlers': {
        'null': {
            'level': 'DEBUG',
            'class': 'logging.NullHandler',
        },
        'console': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
        },
        'operation': {
            'level': 'INFO',
            'class': 'logging.StreamHandler',
            'formatter': 'operation',
        },
    },
    'loggers': {
        'django.db.backends': {
            'handlers': ['null'],
            'propagate': False,
        },
        'requests': {
            'handlers': ['null'],
            'propagate': False,
        },
        'horizon': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'horizon.operation_log': {
            'handlers': ['operation'],
            'level': 'INFO',
            'propagate': False,
        },
        'openstack_dashboard': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'novaclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'cinderclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'keystoneclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'glanceclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'neutronclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'heatclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'swiftclient': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'openstack_auth': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'nose.plugins.manager': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'django': {
            'handlers': ['console'],
            'level': 'DEBUG',
            'propagate': False,
        },
        'iso8601': {
            'handlers': ['null'],
            'propagate': False,
        },
        'scss': {
            'handlers': ['null'],
            'propagate': False,
        },
    },
}
SECURITY_GROUP_RULES = {
    'all_tcp': {
        'name': _('All TCP'),
        'ip_protocol': 'tcp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_udp': {
        'name': _('All UDP'),
        'ip_protocol': 'udp',
        'from_port': '1',
        'to_port': '65535',
    },
    'all_icmp': {
        'name': _('All ICMP'),
        'ip_protocol': 'icmp',
        'from_port': '-1',
        'to_port': '-1',
    },
    'ssh': {
        'name': 'SSH',
        'ip_protocol': 'tcp',
        'from_port': '22',
        'to_port': '22',
    },
    'smtp': {
        'name': 'SMTP',
        'ip_protocol': 'tcp',
        'from_port': '25',
        'to_port': '25',
    },
    'dns': {
        'name': 'DNS',
        'ip_protocol': 'tcp',
        'from_port': '53',
        'to_port': '53',
    },
    'http': {
        'name': 'HTTP',
        'ip_protocol': 'tcp',
        'from_port': '80',
        'to_port': '80',
    },
    'pop3': {
        'name': 'POP3',
        'ip_protocol': 'tcp',
        'from_port': '110',
        'to_port': '110',
    },
    'imap': {
        'name': 'IMAP',
        'ip_protocol': 'tcp',
        'from_port': '143',
        'to_port': '143',
    },
    'ldap': {
        'name': 'LDAP',
        'ip_protocol': 'tcp',
        'from_port': '389',
        'to_port': '389',
    },
    'https': {
        'name': 'HTTPS',
        'ip_protocol': 'tcp',
        'from_port': '443',
        'to_port': '443',
    },
    'smtps': {
        'name': 'SMTPS',
        'ip_protocol': 'tcp',
        'from_port': '465',
        'to_port': '465',
    },
    'imaps': {
        'name': 'IMAPS',
        'ip_protocol': 'tcp',
        'from_port': '993',
        'to_port': '993',
    },
    'pop3s': {
        'name': 'POP3S',
        'ip_protocol': 'tcp',
        'from_port': '995',
        'to_port': '995',
    },
    'ms_sql': {
        'name': 'MS SQL',
        'ip_protocol': 'tcp',
        'from_port': '1433',
        'to_port': '1433',
    },
    'mysql': {
        'name': 'MYSQL',
        'ip_protocol': 'tcp',
        'from_port': '3306',
        'to_port': '3306',
    },
    'rdp': {
        'name': 'RDP',
        'ip_protocol': 'tcp',
        'from_port': '3389',
        'to_port': '3389',
    },
}
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES',
                              'LAUNCH_INSTANCE_DEFAULTS',
                              'OPENSTACK_IMAGE_FORMATS',
                              'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN']
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
 
完成安裝
重啟web服務器以及會話存儲服務:
 
[root@openstack1 ~]# systemctl restart httpd.service memcached.service
 
驗證儀表盤的操作。
 
在瀏覽器中輸入 http://10.88.66.15/dashboard或http://43.239.121.156/dashboard/訪問儀表盤。
 
驗證使用 admin 或者``demo``用戶憑證和``default``域憑證。
 
 
 
第八章 創建第一台Openstack雲主機
 
創建第一台虛擬機
 
1、創建一個網絡:
[root@openstack1 ~]# source admin-openstack.sh
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network provider --provider-network-type flat provider
[root@openstack1 ~]# openstack network create  --share --external -roviderhysical-network public -rovider-network-type flat public-net
 
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+----------+--------------------------------------+
| ID                                   | Name     | Subnets                              |
+--------------------------------------+----------+--------------------------------------+
| 76ed28c8-4afd-41b7-ab9b-17cd8afa436e | provider | 22436418-64e1-47b8-aa48-01a0162a9ad9 |
+--------------------------------------+----------+--------------------------------------+
創建一個子網:
[root@openstack1 ~]# openstack subnet create --network provider --allocation-pool start=192.168.2.101,end=192.168.2.250 --dns-nameserver 114.114.114.114 --gateway 192.168.2.1 --subnet-range 192.168.2.0/24 provider
[root@openstack1 ~]# openstack subnet create --network public-net --allocationool start=192.168.56.20,end=192.168.56.200 --dns-nameserver 223.5.5.5 --gateway 192.168.56.1  --subnet-range 192.168.56.0/24 public-net
 
 
[root@openstack1 ~]# openstack subnet  list   ----查看網絡和子網
+--------------------------------------+----------+--------------------------------------+----------------+
| ID                                   | Name     | Network                              | Subnet         |
+--------------------------------------+----------+--------------------------------------+----------------+
| 22436418-64e1-47b8-aa48-01a0162a9ad9 | provider | 76ed28c8-4afd-41b7-ab9b-17cd8afa436e | 192.168.2.0/24 |
+--------------------------------------+----------+--------------------------------------+----------------+
 
[root@openstack1 ~]# openstack subnet  show 22436418-64e1-47b8-aa48-01a0162a9ad9
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.2.101-192.168.2.250          |
| cidr                    | 192.168.2.0/24                       |
| created_at              | 2019-02-19T03:15:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.2.1                          |
| host_routes             |                                      |
| id                      | 22436418-64e1-47b8-aa48-01a0162a9ad9 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | provider                             |
| network_id              | 76ed28c8-4afd-41b7-ab9b-17cd8afa436e |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T03:15:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
創建m1.nano規格的主機
默認的最小規格的主機需要512 MB內存。對於環境中計算節點內存不足4 GB的,我們推薦創建只需要64 MB的``m1.nano``規格的主機。若單純為了測試的目的,請使用``m1.nano``規格的主機來加載CirrOS鏡像
 
[root@openstack1 ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
+----------------------------+---------+
| Field                      | Value   |
+----------------------------+---------+
| OS-FLV-DISABLED:disabled   | False   |
| OS-FLV-EXT-DATA:ephemeral  | 0       |
| disk                       | 1       |
| id                         | 0       |
| name                       | m1.nano |
| os-flavor-access:is_public | True    |
| properties                 |         |
| ram                        | 64      |
| rxtx_factor                | 1.0     |
| swap                       |         |
| vcpus                      | 1       |
+----------------------------+---------+
 
 
 
生成一個鍵值對
 
 
導入租戶``demo``的憑證
[root@openstack1 ~]# source demo-openstack.sh
 
生成和添加秘鑰對:
[root@openstack1 ~]# ssh-keygen -q -N ""
Enter file in which to save the key (/root/.ssh/id_rsa):
[root@openstack1 ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
+-------------+-------------------------------------------------+
| Field       | Value                                           |
+-------------+-------------------------------------------------+
| fingerprint | 81:0d:c4:d2:7f:ea:ae:47:fd:c9:70:d4:98:cf:9e:cd |
| name        | mykey                                           |
| user_id     | 875898ea26d742e58161f248fd954752                |
+-------------+-------------------------------------------------+
 
[root@openstack1 ~]# openstack keypair list
+-------+-------------------------------------------------+
| Name  | Fingerprint                                     |
+-------+-------------------------------------------------+
| mykey | 81:0d:c4:d2:7f:ea:ae:47:fd:c9:70:d4:98:cf:9e:cd |
+-------+-------------------------------------------------+
 
 
增加安全組規則
 
添加規則到 default 安全組。
 
允許 ICMP (ping):
 
[root@openstack1 ~]# openstack security group rule create --protocol icmp default
+-------------------+--------------------------------------+
| Field             | Value                                |
+-------------------+--------------------------------------+
| created_at        | 2018-03-06T04:35:19Z                 |
| description       |                                      |
| direction         | ingress                              |
| ethertype         | IPv4                                 |
| headers           |                                      |
| id                | 2ef58ebf-b582-4f99-9936-184c40abe5fc |
| port_range_max    | None                                 |
| port_range_min    | None                                 |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| protocol          | icmp                                 |
| remote_group_id   | None                                 |
| remote_ip_prefix  | 0.0.0.0/0                            |
| revision_number   | 1                                    |
| security_group_id | 7e05baa2-3b19-42c5-85f6-c9f215ca28ce |
| updated_at        | 2018-03-06T04:35:19Z                 |
+-------------------+--------------------------------------+
 
允許安全 shell (SSH) 的訪問:
[root@openstack1 ~]# openstack security group rule create --protocol tcp --dst-port 22 default
+-------------------+--------------------------------------+
| Field             | Value                                |
+-------------------+--------------------------------------+
| created_at        | 2018-03-06T04:36:48Z                 |
| description       |                                      |
| direction         | ingress                              |
| ethertype         | IPv4                                 |
| headers           |                                      |
| id                | 8b099d42-875a-45e6-bf2e-da3780b5453d |
| port_range_max    | 22                                   |
| port_range_min    | 22                                   |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| project_id        | e0c3f7cb756c4ab79cf92fc99d8f073b     |
| protocol          | tcp                                  |
| remote_group_id   | None                                 |
| remote_ip_prefix  | 0.0.0.0/0                            |
| revision_number   | 1                                    |
| security_group_id | 7e05baa2-3b19-42c5-85f6-c9f215ca28ce |
| updated_at        | 2018-03-06T04:36:48Z                 |
+-------------------+--------------------------------------+
 
 
 
 
 
確定實例選項
啟動一台實例,您必須至少指定一個類型、鏡像名稱、網絡、安全組、密鑰和實例名稱。
 
列出可用類型:
[root@openstack1 ~]# openstack flavor list
+----+---------+-----+------+-----------+-------+-----------+
| ID | Name    | RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+---------+-----+------+-----------+-------+-----------+
| 0  | m1.nano |  64 |    1 |         0 |     1 | True      |
+----+---------+-----+------+-----------+-------+-----------+
 
列出可用鏡像:
 
[root@openstack1 ~]# openstack image list
+--------------------------------------+-----------------------+--------+
| ID                                   | Name                  | Status |
+--------------------------------------+-----------------------+--------+
| 64b3a04f-0d1f-4dee-8604-5f33d7769d22 | CentOS-6.9-x86_64-min | active |
| d464af77-9588-43e7-a3d4-3f5f26000030 | cirros                | active |
+--------------------------------------+-----------------------+--------+
 
 
列出可用網絡:
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+------------+--------------------------------------+
| ID                                   | Name       | Subnets                              |
+--------------------------------------+------------+--------------------------------------+
| cab15ce1-cf69-4fbd-ba1f-88f525e98e0f | public-net | d632d023-1911-4ad3-b806-1bf8a7089771 |
+--------------------------------------+------------+--------------------------------------+
 
 
列出可用的安全組:
 
[root@openstack1 ~]# openstack security group list
+--------------------------------------+---------+------------------------+----------------------------------+
| ID                                   | Name    | Description            | Project                          |
+--------------------------------------+---------+------------------------+----------------------------------+
| 7e05baa2-3b19-42c5-85f6-c9f215ca28ce | default | Default security group | e0c3f7cb756c4ab79cf92fc99d8f073b |
+--------------------------------------+---------+------------------------+----------------------------------+
 
 
創建虛擬機:
[root@openstack1 ~]# openstack server create --flavor m1.nano --image cirros --nic net-id=cab15ce1-cf69-4fbd-ba1f-88f525e98e0f --security-group default --key-name mykey provider-instance
+--------------------------------------+-----------------------------------------------+
| Field                                | Value                                         |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                        |
| OS-EXT-AZ:availability_zone          |                                               |
| OS-EXT-STS:power_state               | NOSTATE                                       |
| OS-EXT-STS:task_state                | scheduling                                    |
| OS-EXT-STS:vm_state                  | building                                      |
| OS-SRV-USG:launched_at               | None                                          |
| OS-SRV-USG:terminated_at             | None                                          |
| accessIPv4                           |                                               |
| accessIPv6                           |                                               |
| addresses                            |                                               |
| adminPass                            | PLftyqaoyC86                                  |
| config_drive                         |                                               |
| created                              | 2018-03-06T05:02:50Z                          |
| flavor                               | m1.nano (0)                                   |
| hostId                               |                                               |
| id                                   | d8517ee2-4b40-4128-b087-cd76efcdd78c          |
| image                                | cirros (d464af77-9588-43e7-a3d4-3f5f26000030) |
| key_name                             | mykey                                         |
| name                                 | provider-instance                             |
| os-extended-volumes:volumes_attached | []                                            |
| progress                             | 0                                             |
| project_id                           | e0c3f7cb756c4ab79cf92fc99d8f073b              |
| properties                           |                                               |
| security_groups                      | [{u'name': u'default'}]                       |
| status                               | BUILD                                         |
| updated                              | 2018-03-06T05:02:51Z                          |
| user_id                              | 875898ea26d742e58161f248fd954752              |
+--------------------------------------+-----------------------------------------------+
查看創建的虛擬機狀態:
 
[root@openstack1 ~]# openstack server list
+--------------------------------------+-------------------+--------+--------------------------+------------+
| ID                                   | Name              | Status | Networks                 | Image Name |
+--------------------------------------+-------------------+--------+--------------------------+------------+
| 94d530ed-5baf-41a1-bd6a-323d55abe65b | provider-instance | ACTIVE | public-net=192.168.56.24 | cirros     |
+--------------------------------------+-------------------+--------+--------------------------+------------+
 
[root@openstack1 ~]# ssh cirros@192.168.56.23
$ ifconfig
eth0      Link encap:Ethernet  HWaddr FA:16:3E:54:44:41 
          inet addr:192.168.56.23  Bcast:192.168.56.255  Mask:255.255.255.0
          inet6 addr: fe80::f816:3eff:fe54:4441/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:3207 errors:0 dropped:0 overruns:0 frame:0
          TX packets:148 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:198632 (193.9 KiB)  TX bytes:14166 (13.8 KiB)
 
lo        Link encap:Local Loopback 
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:16436  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
 
$ whoami
cirros
 
cirros鏡像默認的帳號密碼
user:cirros
password:cubswin:)
 
已創建成功並且可以登錄了
 
----------------------------------------------------------------------------------------------------
 
用命令獲取虛擬機的url地址:
 
[root@openstack1 ~]# nova get-vnc-console provider-instance novnc
+-------+------------------------------------------------------------------------------------+
| Type  | Url                                                                                |
+-------+------------------------------------------------------------------------------------+
| novnc | http://10.88.66.15:6080/vnc_auto.html?token=f79e53d8-1c2d-460d-b1fa-694ee0102f40 |
+-------+------------------------------------------------------------------------------------+
 
----------------------------------------------------------------------------------------------------
 
在瀏覽器中輸入:http://10.88.66.15:6080/vnc_auto.html?token=f79e53d8-1c2d-460d-b1fa-694ee0102f40
 
則可以登錄到虛擬機。
 
關於報錯:
1.openstack安裝dashboard后訪問horizon出錯
 
訪問http://10.88.66.15/dashboard出錯500:internal server error
 
解決辦法:
編輯:/etc/httpd/conf.d/openstack-dashboard.conf
在WSGISocketPrefix run/wsgi下面加一行代碼:
WSGIApplicationGroup %{GLOBAL}
保存,重啟httpd服務。
 
2.登陸雲主機控制台提示:Booting from Hard Disk
 
解決:計算節點必須配置 libvirt 來使用 QEMU 去代替 KVM
 
[root@openstack2 ~]# vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu
cpu_mode=none
重啟相關服務
[root@openstack2 ~]#systemctl restart libvirtd.service openstack-nova-compute.service
 
3.dashboard儀表盤無法登陸
 
瀏覽器輸入 http://controller/dashboard 訪問儀表盤無法登陸,提示“出錯啦!
 
遇到異常情況,請刷新。如需幫助請聯系管理員。”
# tail -n 40 /var/log/apache2/error.log
[Wed Mar 15 22:56:22.744149 2017] [:error] [pid 2733] Login successful for user "admin".
[Wed Mar 15 22:56:34.220718 2017] [:error] [pid 2733] Internal Server Error: /dashboard/auth/login/
[Wed Mar 15 22:56:34.220920 2017] [:error] [pid 2733] Traceback (most recent call last):
[Wed Mar 15 22:56:34.220935 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/core/handlers/base.py", line 132, in get_response
[Wed Mar 15 22:56:34.220943 2017] [:error] [pid 2733]     response = wrapped_callback(request, *callback_args, **callback_kwargs)
[Wed Mar 15 22:56:34.220951 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/debug.py", line 76, in sensitive_post_parameters_wrapper
[Wed Mar 15 22:56:34.220959 2017] [:error] [pid 2733]     return view(request, *args, **kwargs)
[Wed Mar 15 22:56:34.220966 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/utils/decorators.py", line 110, in _wrapped_view
[Wed Mar 15 22:56:34.220973 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.220981 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/cache.py", line 57, in _wrapped_view_func
[Wed Mar 15 22:56:34.221043 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221052 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/openstack_auth/views.py", line 103, in login
[Wed Mar 15 22:56:34.221059 2017] [:error] [pid 2733]     **kwargs)
[Wed Mar 15 22:56:34.221067 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/debug.py", line 76, in sensitive_post_parameters_wrapper
[Wed Mar 15 22:56:34.221075 2017] [:error] [pid 2733]     return view(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221082 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/utils/decorators.py", line 110, in _wrapped_view
[Wed Mar 15 22:56:34.221089 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221095 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/views/decorators/cache.py", line 57, in _wrapped_view_func
[Wed Mar 15 22:56:34.221102 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221109 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/views.py", line 51, in login
[Wed Mar 15 22:56:34.221179 2017] [:error] [pid 2733]     auth_login(request, form.get_user())
[Wed Mar 15 22:56:34.221206 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/__init__.py", line 110, in login
[Wed Mar 15 22:56:34.221214 2017] [:error] [pid 2733]     request.session.cycle_key()
[Wed Mar 15 22:56:34.221221 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/base.py", line 285, in cycle_key
[Wed Mar 15 22:56:34.221228 2017] [:error] [pid 2733]     self.create()
[Wed Mar 15 22:56:34.221269 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/cache.py", line 48, in create
[Wed Mar 15 22:56:34.221281 2017] [:error] [pid 2733]     "Unable to create a new session key. "
[Wed Mar 15 22:56:34.221288 2017] [:error] [pid 2733] RuntimeError: Unable to create a new session key. It is likely that the cache is unavailable.
[root@openstack1 ~]# tail -f /var/log/httpd/error_log
[Wed Mar 15 22:56:34.221102 2017] [:error] [pid 2733]     response = view_func(request, *args, **kwargs)
[Wed Mar 15 22:56:34.221109 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/views.py", line 51, in login
[Wed Mar 15 22:56:34.221179 2017] [:error] [pid 2733]     auth_login(request, form.get_user())
[Wed Mar 15 22:56:34.221206 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/auth/__init__.py", line 110, in login
[Wed Mar 15 22:56:34.221214 2017] [:error] [pid 2733]     request.session.cycle_key()
[Wed Mar 15 22:56:34.221221 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/base.py", line 285, in cycle_key
[Wed Mar 15 22:56:34.221228 2017] [:error] [pid 2733]     self.create()
[Wed Mar 15 22:56:34.221269 2017] [:error] [pid 2733]   File "/usr/lib/python2.7/siteackages/django/contrib/sessions/backends/cache.py", line 48, in create
[Wed Mar 15 22:56:34.221281 2017] [:error] [pid 2733]     "Unable to create a new session key. "
[Wed Mar 15 22:56:34.221288 2017] [:error] [pid 2733] RuntimeError: Unable to create a new session key. It is likely that the cache is unavailable.
 
 
解決:更改dashboard的local_settings配置文件並重啟httpd和memcached,刷新后可正常登陸,這是在國外的論壇上找到得解決方法。  
 
# vim /etc/openstack-dashboard/local_settings
#SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
# systemctl restart httpd memcached
# systemctl status httpd memcached
 
 
 
擴展:最常用的使用Linuxbridge驅動的vlan網絡模型
在上面網絡配置的基礎上進行修改
編輯/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2_type_vlan]
...
network_vlan_ranges = default:1:4000,external:1:4000
[ml2]
...
tenant_network_types = vlan
 
 
[root@openstack1 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[l2pop]
[ml2]
type_drivers = flat,vlan,gre,vxlan,geneve
tenant_network_types = vlan
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = external
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
network_vlan_ranges = default:1:4000,external:1:4000
[ml2_type_vxlan]
[securitygroup]
enable_ipset = true
 
 
[root@openstack1 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
[root@openstack2 ~]# grep -v '^#\|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = default:eth1,external:eth2
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
enable_security_group = true
[vxlan]
enable_vxlan = false
 
重啟控制節點和計算節點的網絡服務
systemctl restart neutron-server neutron-metadata-agent neutron-linuxbridge-agent neutron-l3-agent neutron-dhcp-agent
[root@openstack2 ~]# systemctl restart neutron-linuxbridge-agent.service
 
創建基於vlan的網絡
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network default --provider-network-type vlan vlan99
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2019-02-19T06:58:07Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan99                               |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 149                                  |
| qos_policy_id             | None                                 |
| revision_number           | 3                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| updated_at                | 2019-02-19T06:58:07Z                 |
+---------------------------+--------------------------------------+
 
[root@openstack1 ~]# openstack subnet create --network vlan99 --allocation-pool start=192.168.99.101,end=192.168.99.250 --dns-nameserver 114.114.114.114 --gateway 192.168.99.1 --subnet-range 192.168.99.0/24 vlan99-sub
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.99.101-192.168.99.250        |
| cidr                    | 192.168.99.0/24                      |
| created_at              | 2019-02-19T07:01:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.99.1                         |
| host_routes             |                                      |
| id                      | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan99-sub                           |
| network_id              | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:01:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack network list
+--------------------------------------+--------+--------------------------------------+
| ID                                   | Name   | Subnets                              |
+--------------------------------------+--------+--------------------------------------+
| 4c530c4c-6bb5-4927-9983-ea58aba82b42 | vlan99 | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
+--------------------------------------+--------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack network show 4c530c4c-6bb5-4927-9983-ea58aba82b42
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        | nova                                 |
| created_at                | 2019-02-19T06:58:07Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan99                               |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 149                                  |
| qos_policy_id             | None                                 |
| revision_number           | 4                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:01:34Z                 |
+---------------------------+--------------------------------------+
 
 
[root@openstack1 ~]# openstack subnet list
+--------------------------------------+------------+--------------------------------------+-----------------+
| ID                                   | Name       | Network                              | Subnet          |
+--------------------------------------+------------+--------------------------------------+-----------------+
| c31248d4-31a9-4c54-b26b-23c2ba8be758 | vlan99-sub | 4c530c4c-6bb5-4927-9983-ea58aba82b42 | 192.168.99.0/24 |
+--------------------------------------+------------+--------------------------------------+-----------------+
 
 
[root@openstack1 ~]# openstack subnet show c31248d4-31a9-4c54-b26b-23c2ba8be758
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.99.101-192.168.99.250        |
| cidr                    | 192.168.99.0/24                      |
| created_at              | 2019-02-19T07:01:34Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.99.1                         |
| host_routes             |                                      |
| id                      | c31248d4-31a9-4c54-b26b-23c2ba8be758 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan99-sub                           |
| network_id              | 4c530c4c-6bb5-4927-9983-ea58aba82b42 |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:01:34Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
哈哈,結果好像不是我需要的vlan99  
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
[root@openstack2 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540030bc71    no        eth1.149
                            tap8e5772bb-66
[root@openstack2 ~]# virsh list
Id    Name                           State
----------------------------------------------------
3     instance-0000000a              running
 
[root@openstack2 ~]# virsh domiflist instance-0000000a
Interface  Type       Source     Model       MAC
-------------------------------------------------------
tap8e5772bb-66 bridge     brq4c530c4c-6b virtio      fa:16:3e:20:a8:72
 
 
我們在平台上創建個vlan100試試
 
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
brq5e0a1e0d-17        8000.52540019041e    no        eth1.100
                            tape498ff08-59
平台創建的沒有毛病了
 
[root@openstack2 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540030bc71    no        eth1.149
                            tap8e5772bb-66
brq5e0a1e0d-17        8000.52540030bc71    no        eth1.100
                            tap2d0a57c6-cb
 
[root@openstack2 ~]# virsh list --all
Id    Name                           State
----------------------------------------------------
3     instance-0000000a              running
5     instance-0000000c              running
 
[root@openstack2 ~]# virsh domiflist instance-0000000c
Interface  Type       Source     Model       MAC
-------------------------------------------------------
tap2d0a57c6-cb bridge     brq5e0a1e0d-17 virtio      fa:16:3e:c8:14:8a
 
[root@openstack1 ~]# openstack network show 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        | nova                                 |
| created_at                | 2019-02-19T07:21:35Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan100                              |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 100                                  |
| qos_policy_id             | None                                 |
| revision_number           | 4                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   | 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:23:27Z                 |
+---------------------------+--------------------------------------+
 
[root@openstack1 ~]# openstack subnet show 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.100.101-192.168.100.210      |
| cidr                    | 192.168.100.0/24                     |
| created_at              | 2019-02-19T07:23:27Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.100.1                        |
| host_routes             |                                      |
| id                      | 2dd900a9-f1d0-42e1-9c2d-b4a81237e29f |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan-sub                             |
| network_id              | 5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:23:27Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
那么我們如何使用命令行來創建vlan呢?把最開始的哪些命令稍作修改並添加和vlan相關的參數-provider-segment,我們在創建個vlan101試試
[root@openstack1 ~]# openstack network create  --share --external --provider-physical-network default --provider-network-type vlan --provider-segment 101 vlan101
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | UP                                   |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2019-02-19T07:36:26Z                 |
| description               |                                      |
| dns_domain                | None                                 |
| id                        | a0328240-60bc-4d18-8092-05a3882ee13d |
| ipv4_address_scope        | None                                 |
| ipv6_address_scope        | None                                 |
| is_default                | False                                |
| is_vlan_transparent       | None                                 |
| mtu                       | 1500                                 |
| name                      | vlan101                              |
| port_security_enabled     | True                                 |
| project_id                | 6840d3aa8b814d9caa54432ce44471b6     |
| provider:network_type     | vlan                                 |
| provider:physical_network | default                              |
| provider:segmentation_id  | 101                                  |
| qos_policy_id             | None                                 |
| revision_number           | 3                                    |
| router:external           | External                             |
| segments                  | None                                 |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| updated_at                | 2019-02-19T07:36:26Z                 |
+---------------------------+--------------------------------------+
[root@openstack1 ~]# openstack subnet create --network vlan101 --allocation-pool start=192.168.101.101,end=192.168.101.250 --dns-nameserver 114.114.114.114 --gateway 192.168.101.1 --subnet-range 192.168.101.0/24 vlan101-sub
+-------------------------+--------------------------------------+
| Field                   | Value                                |
+-------------------------+--------------------------------------+
| allocation_pools        | 192.168.101.101-192.168.101.250      |
| cidr                    | 192.168.101.0/24                     |
| created_at              | 2019-02-19T07:43:15Z                 |
| description             |                                      |
| dns_nameservers         | 114.114.114.114                      |
| enable_dhcp             | True                                 |
| gateway_ip              | 192.168.101.1                        |
| host_routes             |                                      |
| id                      | 7152d7e2-fc83-4597-b9f5-af58aa3de9e2 |
| ip_version              | 4                                    |
| ipv6_address_mode       | None                                 |
| ipv6_ra_mode            | None                                 |
| name                    | vlan101-sub                          |
| network_id              | a0328240-60bc-4d18-8092-05a3882ee13d |
| project_id              | 6840d3aa8b814d9caa54432ce44471b6     |
| revision_number         | 0                                    |
| segment_id              | None                                 |
| service_types           |                                      |
| subnetpool_id           | None                                 |
| tags                    |                                      |
| updated_at              | 2019-02-19T07:43:15Z                 |
| use_default_subnet_pool | None                                 |
+-------------------------+--------------------------------------+
 
[root@openstack1 ~]# brctl show
bridge name    bridge id        STP enabled    interfaces
brq4c530c4c-6b        8000.52540019041e    no        eth1.149
                            tap715df6be-b8
brq5e0a1e0d-17        8000.52540019041e    no        eth1.100
                            tape498ff08-59
brqa0328240-60        8000.023e6f041beb    no        eth1.101
                            tap8fd757d3-8d、
 
[root@openstack1 ~]# ip netns list
qdhcp-a0328240-60bc-4d18-8092-05a3882ee13d (id: 2)
qdhcp-5e0a1e0d-17b8-4c27-a0f9-650c298a9ecd (id: 1)
qdhcp-4c530c4c-6bb5-4927-9983-ea58aba82b42 (id: 0)
[root@openstack1 ~]# ip netns exec qdhcp-a0328240-60bc-4d18-8092-05a3882ee13d ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ns-8fd757d3-8d@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    link/ether fa:16:3e:97:1e:75 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 192.168.101.101/24 brd 192.168.101.255 scope global ns-8fd757d3-8d
       valid_lft forever preferred_lft forever
    inet 169.254.169.254/16 brd 169.254.255.255 scope global ns-8fd757d3-8d
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fe97:1e75/64 scope link
       valid_lft forever preferred_lft forever
 
 
好了,如果我們把創建的那些子網換成公網地址,服務器的eth1網卡對接的交換機端口都是trunk的,並且網關指向公網網關
那么我們的雲平台完全可以把創建的雲主機和外網溝通。因為雲主機從子網分配到了就是公網地址。
 
 
報錯匯總
錯誤:_init__() got an unexpected keyword argument 'user_domain_name'
解決
這個一般是用戶密碼錯誤,清除環境變量試試
執行
unset OS_AUTH_URL OS_PASSWORD OS_TOKEN
 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM