聲明
該文檔在創建實例前都沒問題,在創建實例時報錯如下:
Expecting to find domain in user. The server could not comply with the request since it is either malformed or otherwise incorrect. The client is assumed to be in error
假設你剛好需要Train版本,該文檔可作為參考,但可能最后這個小問題還需一同找找問題的所在
歡迎加入QQ群一起討論Linux、開源等技術
硬件配置
192.168.50.133(外網、橋接)/192.168.1.133(內網、自定義網絡)/4U/16G/100G/controller
192.168.50.134(外網、橋接)/192.168.1.134(內網、自定義網絡)/4U/16G/100G/compute
192.168.1.135(內網、自定義網絡)/4U/16G/100G/block
前期設置(所有節點執行)
# 設置主機名
hostnamectl set-hostname controller && reboot
hostnamectl set-hostname compute && reboot
hostnamectl set-hostname block && reboot
# 配置hosts
vim /etc/host
192.168.50.133 controller
192.168.50.134 compute
192.168.50.135 block
# 關閉防火牆和selinux
systemctl stop firewalld && systemctl disable firewalld
sed -i "s/SELINUX=enforcing/SELINUX=disabled/" /etc/selinux/config
# 安裝時間服務
controller節點配置:
yum -y install chrony
vim /etc/chrony.conf
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp1.aliyun.com iburst
systemctl restart chronyd && systemctl enable chronyd
# 其他節點配置
yum -y install chrony
vim /etc/chrony.conf
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server 192.168.50.133 iburst
systemctl restart chronyd && systemctl enable chronyd
各節點驗證
chronyc sources
# 各節點配置OpenStack源
vim /etc/yum.repos.d/openstack.repo
[base]
name=CentOS-$releasever - Base
baseurl=https://mirrors.aliyun.com/centos/$releasever/cloud/$basearch/
gpgcheck=0
[updates]
name=CentOS-$releasever - Updates
baseurl=https://mirrors.aliyun.com/centos/$releasever/updates/$basearch/
[extras]
name=CentOS-$releasever - Extras
baseurl=https://mirrors.aliyun.com/centos/$releasever/extras/$basearch/
gpgcheck=0
# 將repo文件發送到其他主機上
scp /etc/yum.repos.d/openstack.repo root@192.168.50.xxx:/etc/yum.repos.d/
yum clean all && yum makecache
# 各節點開始安裝
yum -y install centos-release-openstack-train
yum -y upgrade
yum -y install python-openstackclient
yum -y install openstack-selinux
只在controll節點上執行
安裝SQL數據庫
yum -y install mariadb mariadb-server python2-PyMySQL
cp /etc/my.cnf.d/openstack.cnf{,.bak}
vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.50.133 # hosts文件中定義的那個解析controller就寫哪個
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
systemctl start mariadb && systemctl enable mariadb
mysql_secure_installation
密碼定義為: Aa1122**
安裝消息隊列
yum -y install rabbitmq-server
systemctl start rabbitmq-server && systemctl enable rabbitmq-server
# 添加openstack用戶
rabbitmqctl add_user openstack RABBIT_PASS
# 給openstack用戶授權並配置
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
安裝memcached
yum -y install memcached python-memcached
cp /etc/sysconfig/memcached{,.bak}
sed -i 's/OPTIONS="-l 127.0.0.1,::1"/OPTIONS="-l 127.0.0.1,::1,controller"/' /etc/sysconfig/memcached
systemctl start memcached.service && systemctl enable memcached.service
安裝etcd
yum -y install etcd
vim /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.133:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.133:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.133:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.133:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.1.133:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
systemctl start etcd && systemctl enable etcd
controller節點安裝Identity 服務(代號keystone)
mysql -uroot -p
create database keystone;
grant all privileges on keystone.* to 'keystone'@'localhost' identified by 'KEYSTONE_DBPASS';
grant all privileges on keystone.* to 'keystone'@'%' identified by 'KEYSTONE_DBPASS';
# 安裝和配置前端組件
yum -y install openstack-keystone httpd mod_wsgi
vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[token]
provider = fernet
# 填充identify服務數據庫
su -s /bin/sh -c "keystone-manage db_sync" keystone
# 初始化 Fernet 密鑰庫
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# 引導身份服務
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
# 配置apache服務
vim /etc/httpd/conf/httpd.conf
ServerName controller # 沒有則新加
....
....
<Directory />
AllowOverride none
Require all denied # 將denied改為granted
</Directory>
注:如果不改成granted則一旦新創建的實例有錯誤則無法刪除(頁面上和命令都不行),該問題解決方法來自:https://blog.csdn.net/qq_19007335/article/details/107568713
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl restart httpd && systemctl enable httpd
# 通過設置適當的環境變量來配置管理帳戶
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
# 創建域、項目、用戶和角色
注: 默認自帶default域,如需創建域則執行下行命令
openstack domain create --description "An Example Domain" example
# 查看域
openstack domain list
# 創建service項目
openstack project create --domain default --description "Service Project" service
# 查看service項目中所有服務
openstack service list
# 驗證
## 卸載OS_AUTH_URL OS_PASSWORD變量
unset OS_AUTH_URL OS_PASSWORD
## 作為管理員用戶,請求身份驗證令牌
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
注: 密碼是上面定義的ADMIN_PASS
# 創建 OpenStack 客戶端環境腳本
echo "export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2" > admin-openrc
# 加載下這個腳本
source admin-openrc
# 請求身份驗證令牌
openstack token issue
controller節點上安裝Image服務(代號glance)
mysql -u root -p
create database glance;
grant all privileges on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS';
grant all privileges on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
# 加載下這個腳本
source admin-openrc
# 創建服務憑證
## 創建glance用戶
openstack user create --domain default --password GLANCE_PASS glance
# 在glance用戶和服務項目中添加管理員角色
openstack role add --project service --user glance admin
# 創建glance服務實體
openstack service create --name glance --description "OpenStack Image" image
# 創建image服務API端點
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
# 安裝並配置組件
yum -y install openstack-glance
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = GLANCE_PASS
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
# 填充image服務數據庫
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl start openstack-glance-api && systemctl enable openstack-glance-api
# 驗證
source admin-openrc
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
# 使用 QCOW2 磁盤格式、裸容器格式和公開可見性將圖像上傳到 Image 服務,以便所有項目都可以訪問它
glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img \
--disk-format qcow2 --container-format bare --visibility public
# 公網下載Ubuntu21.10鏡像
wget http://ftp.sjtu.edu.cn/ubuntu-cd/21.10/ubuntu-21.10-desktop-amd64.iso
# 上傳Ubuntu21.10鏡像至openstack上
glance image-create --name "ubuntu-21.10-desktop-amd64" \
--file ./ubuntu-21.10-desktop-amd64.iso \
--disk-format iso --container-format bare --visibility public
# 查看鏡像
openstack image list
controller節點安裝Placement 服務(代號Placement )
# 加載下腳本
source admin-openrc
# 登陸數據庫
mysql -u root -p
create database placement;
grant all privileges on placement.* to 'placement'@'localhost' identified by 'PLACEMENT_DBPASS';
grant all privileges on placement.* to 'placement'@'%' identified by 'PLACEMENT_DBPASS';
openstack user create --domain default --password PLACEMENT_PASS placement
# Placement 用戶添加到具有 admin 角色的服務項目
openstack role add --project service --user placement admin
# 在服務目錄中創建 Placement API 條目
openstack service create --name placement --description "Placement API" placement
# 創建placement API服務端點
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
# 安裝和配置前端組件
yum -y install openstack-placement-api
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS
# 填充placement數據庫
su -s /bin/sh -c "placement-manage db sync" placement
注: 忽略輸出的消息
# 驗證
source admin-openrc
# 執行狀態檢查以確保一切正常
placement-status upgrade check
compute服務(代號nova)
controller節點安裝compute服務
mysql -u root -p
create database nova_api;
create database nova;
create database nova_cell0;
grant all privileges on nova_api.* TO 'nova'@'localhost' identified by 'NOVA_DBPASS';
grant all privileges on nova_api.* TO 'nova'@'%' identified by 'NOVA_DBPASS';
grant all privileges on nova.* TO 'nova'@'localhost' identified by 'NOVA_DBPASS';
grant all privileges on nova.* TO 'nova'@'%' identified by 'NOVA_DBPASS';
grant all privileges on nova_cell0.* TO 'nova'@'localhost' identified by 'NOVA_DBPASS';
grant all privileges on nova_cell0.* TO 'nova'@'%' identified by 'NOVA_DBPASS';
exit
source admin-openrc
# 創建計算服務憑證
## 創建nova用戶
openstack user create --domain default --password NOVA_PASS nova
# 將 admin 角色添加到 nova 用戶
openstack role add --project service --user nova admin
# 創建nova服務實體
openstack service create --name nova --description "OpenStack Compute" compute
# 創建compute API服務端點
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
# 安裝和配置組件
yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
my_ip=192.168.1.133 # controller節點內網IP
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
auth_type = password
auth_url = http://controller:5000/v3
project_name = service
project_domain_name = Default
username = placement
user_domain_name = Default
password = PLACEMENT_PASS
region_name = RegionOne
# 多長時間運行 nova-manage cell_v2discover_hosts 注冊新發現的計算節點
[scheduler]
discover_hosts_in_cells_interval = 300
# 填充nova-api數據庫
su -s /bin/sh -c "nova-manage api_db sync" nova
# 注冊 cell0 數據庫
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# 創建 cell1 單元格
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 填充nova數據庫
su -s /bin/sh -c "nova-manage db sync" nova
注: 這里可能會輸出一些東西,填充完后建議查nova庫中是否有數據
# 驗證 nova cell0 和 cell1 是否正確注冊
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# 啟動 Compute 服務並加入開機自啟
systemctl start \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl enable \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
compute節點安裝compute服務
yum -y install openstack-nova-compute
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:RABBIT_PASS@controller
my_ip = 192.168.1.134 # compute節點的內網的地址
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
auth_type = password
auth_url = http://controller:5000/v3
project_name = service
project_domain_name = Default
username = placement
user_domain_name = Default
password = PLACEMENT_PASS
region_name = RegionOne
# 確定當前計算節點是否支持虛擬機的硬件加速
egrep -c '(vmx|svm)' /proc/cpuinfo
注:
如果返回值為1或更大值則當前計算節點支持硬件加速,不需要額外配置。
如果返回值為零,則當前計算節點不支持硬件加速,必須將 libvirt 配置為使用 QEMU 而不是 KVM。
[root@compute ~]# vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu # 如在虛擬機中做則需要修改成qemu,如在實體中做則改成kvm!!!!!
# 啟動服務
systemctl start libvirtd openstack-nova-compute
systemctl enable libvirtd openstack-nova-compute
systemctl status libvirtd openstack-nova-compute
# 將計算節點添加到單元數據庫(回到controller節點上執行)
source admin-openrc
openstack compute service list --service nova-compute
# 發現compute主機(controller節點上執行)
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
回顯如下:
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': bc775592-3458-43be-b664-6e0775a07a1e
Checking host mapping for compute host 'compute': 795d4810-e3a0-4f9e-af5b-94443a681f03
Creating host mapping for compute host 'compute': 795d4810-e3a0-4f9e-af5b-94443a681f03
Found 1 unmapped computes in cell: bc775592-3458-43be-b664-6e0775a07a1e
controller節點上安裝Networking服務(代號neutron)
mysql -u root -p
create database neutron;
grant all privileges on neutron.* to 'neutron'@'localhost' identified by 'NEUTRON_DBPASS';
grant all privileges on neutron.* to 'neutron'@'%' identified by 'NEUTRON_DBPASS';
source admin-openrc
# 創建服務憑證
## 創建 neutron 用戶
openstack user create --domain default --password NEUTRON_PASS neutron
# 為 neutron 用戶添加 admin 角色
openstack role add --project service --user neutron admin
# 創建neutron服務實體
openstack service create --name neutron --description "OpenStack Networking" network
# 創建networking服務API端點
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
## Networking Option 2: Self-service networks
# 安裝組件
yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
# 配置服務組件
vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
# 在 [DEFAULT] 部分,啟用模塊化第 2 層 (ML2) 插件、路由器服務和重疊 IP 地址(新添加)
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
transport_url = rabbit://openstack:RABBIT_PASS@controller
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
# 新添加[nova]段並在該段中的內容需要額外新添加
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS
# 配置模塊化第 2 層 (ML2) 插件
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
# 新增[ml2]段
[ml2]
# 在 [ml2] 部分,啟用flat、VLAN 和 VXLAN 網絡
type_drivers = flat,vlan,vxlan
# 啟用 VXLAN 自助服務網絡
tenant_network_types = vxlan
# 啟用 Linux 橋接和第 2 層填充機制
mechanism_drivers = linuxbridge,l2population
# 啟用端口安全擴展驅動
extension_drivers = port_security
# VLAN的id,一定要記得寫且必須寫在這里
[ml2_type_vxlan]
vni_ranges = 1:1000
# 配置linux bridge agent
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens192 # 外網網卡名
[vxlan]
enable_vxlan = true
local_ip = 192.168.1.113 # 內網網卡IP
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[root@controller ~]# echo 'net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
[root@controller ~]# modprobe br_netfilter
[root@controller ~]# echo "modprobe br_netfilter" >> /etc/rc.d/rc.local
[root@controller ~]# sysctl -p
# 配置3層 agent
[root@controller ~]# vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
# 配置DHCP agent
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
# 配置metadata agent
[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = 407997e2017d7b2c8670 # 用openssl rand -hex 10命令生成
# 配置計算服務以使用網絡服務
[root@controller ~]# vim /etc/nova/nova.conf
[neutron]
service_metadata_proxy = true
metadata_proxy_shared_secret = 407997e2017d7b2c8670
auth_type = password
auth_url = http://controller:5000
project_name = service
project_domain_name = default
username = neutron
user_domain_name = default
password = NEUTRON_PASS
region_name = RegionOne
# 網絡服務初始化腳本需要一個符號鏈接 /etc/neutron/plugin.ini 指向 ML2 插件配置文件 /etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
# 填充數據庫
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
# 重啟Compute API服務
[root@controller ~]# systemctl restart openstack-nova-api.service
# 啟動網絡服務並加入開機自啟
[root@controller ~]# systemctl restart neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
[root@controller ~]# systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
[root@controller ~]# systemctl status neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
# 如果使用的是網絡的選項2,還需要開啟3層服務
[root@controller ~]# systemctl start neutron-l3-agent.service
[root@controller ~]# systemctl enable neutron-l3-agent.service
[root@controller ~]# systemctl status neutron-l3-agent.service -l
遇到問題1:
cannot load glue library: libibverbs.so.1: cannot open shared object file: No such file or directory
解決:
[root@controller ~]# yum -y install libibverbs
[root@controller ~]# systemctl restart neutron-l3-agent.service
[root@controller ~]# systemctl status neutron-l3-agent.service -l
compute節點安裝並配置
[root@compute ~]# yum -y install openstack-neutron-linuxbridge ebtables ipset
[root@compute ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy = keystone # 該項是新加
transport_url = rabbit://openstack:RABBIT_PASS@controller
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp/lock
# 配置網絡選項2,自定義網絡
## 配置linux橋接agent
[root@compute ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens192 # compute節點外網網卡名
## 新添加[vxlan]段,啟用 VXLAN 覆蓋網絡,配置處理覆蓋網絡的物理網絡接口的 IP 地址,並啟用第 2 層填充:
[vxlan]
enable_vxlan = true
local_ip = 192.168.50.134 # compute節點外網IP
l2_population = true
# 新添加 [securitygroup] 段,啟用安全組並配置 Linux 網橋 iptables 防火牆驅動程序:
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 確保 Linux 操作系統內核支持網橋過濾器
[root@compute ~]# echo 'net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
[root@compute ~]# modprobe br_netfilter
[root@compute ~]# echo "modprobe br_netfilter" >> /etc/rc.d/rc.local
[root@compute ~]# sysctl -p
# 配置計算服務以使用網絡服務
[root@compute ~]# vim /etc/nova/nova.conf
[neutron]
auth_type=password
auth_url = http://controller:5000
project_name = service
project_domain_name = default
username = neutron
project_domain_name = default
password = NEUTRON_PASS
region_name = RegionOne
# 重啟compute服務
[root@compute ~]#
systemctl restart openstack-nova-compute.service
systemctl start neutron-linuxbridge-agent.service
systemctl enable neutron-linuxbridge-agent.service
systemctl status neutron-linuxbridge-agent.service
## Dashboard 服務(代號horizon)
在controller節點上執行
yum -y install openstack-dashboard
vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "127.0.0.1"
改為
OPENSTACK_HOST = "controller"
# 開啟identity API的3版本
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
ALLOWED_HOSTS = ['horizon.example.com', 'localhost']
改為
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
改為
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
},
}
改為
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
},
}
# 啟用對域的支持(該項新添加)
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# 配置API版本(該項新添加)
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
# 將 Default 配置為您通過儀表板創建的用戶的默認域(該項新添加)
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
# 將用戶配置為您通過儀表板創建的用戶的默認角色(該項新添加)
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
# 配置時間時區
TIME_ZONE = "Asia/Shanghai"
# 檢查配置文件是否有如下項,如無則需手動添加
[root@controller ~]# vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
# 重啟web服務和會話存儲
[root@controller ~]# systemctl restart httpd memcached
[root@controller ~]# systemctl status httpd memcached
# 瀏覽器驗證(暫打不開,下面有解決方法)
http://172.16.186.5/dashboard
[root@controller ~]# cat admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default # 登陸域
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin # 登陸賬號
export OS_PASSWORD=ADMIN_PASS # 登陸密碼
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
注: 截止到現在並登陸不上去,官網沒有給出解決辦法,解決辦法如下:
[root@controller ~]# cd /usr/share/openstack-dashboard/
# 重建dashboard配置
[root@controller openstack-dashboard]# cp -R /etc/httpd/conf.d/openstack-dashboard.conf{,.bak}
[root@controller openstack-dashboard]# python manage.py make_web_conf --apache > /etc/httpd/conf.d/openstack-dashboard.conf
# 登錄到dashboard將出現權限錯誤和顯示混亂,需要建立策略的軟鏈接
[root@controller openstack-dashboard]# ln -s /etc/openstack-dashboard /usr/share/openstack-dashboard/openstack_dashboard/conf
# 在local_settings最后新增根目錄指向
[root@controller openstack-dashboard]# vim /etc/openstack-dashboard/local_settings
WEBROOT = '/dashboard/' # 新添加該項
[root@controller openstack-dashboard]# vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi.py
改為
WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
Alias /dashboard/static /usr/share/openstack-dashboard/static
# 重啟生效
[root@controller openstack-dashboard]# systemctl restart httpd memcached
[root@controller openstack-dashboard]# systemctl status httpd memcached
# 瀏覽器再次驗證
http://http://192.168.50.133/dashboard
.
.
.
.
.
-------------------------------------------------
以下創建主機操作不變,只是IP不同
上面環境的IP:
192.168.50.133(外網、橋接)/192.168.1.133(內網、自定義網絡)/4U/16G/100G/controller
192.168.50.134(外網、橋接)/192.168.1.134(內網、自定義網絡)/4U/16G/100G/compute
192.168.1.135(內網、NAT網絡)/4U/16G/100G/block
下圖環境和上圖對應關系:
controller節點:
192.168.50.133(外網、橋接)/192.168.1.133(內網、自定義網絡)
對應
192.168.1.111(外網、橋接)/172.16.186.131/24(內網、NAT網絡)
compute節點:
192.168.50.134(外網、橋接)/192.168.1.134(內網、NAT網絡)
對應
192.168.1.112/24(外網、橋接)/172.16.186.132/24(內網、NAT網絡)
block節點:
192.168.1.135/24
改為
172.16.186.133/24
創建虛擬機前的操作
以下創建虛擬機是單獨創建的域、用戶等所有
# 創建域
[root@controller ~]# openstack domain create 210Demo
# 創建項目
[root@controller ~]#
openstack project create --domain 210Demo Engineering
openstack project create --domain 210Demo Production
# 其他項目類參考命令
openstack project list
openstack project delete <[Project_ID] | [Name]>
# 創建用戶
[root@controller ~]#
openstack user create --domain 210Demo --project Engineering --password redhat --email zhangsan@lab.example.com zhangsan
openstack user create --domain 210Demo --project Production --password redhat --email lisi@lab.example.com lisi
# 設置zhangsan用戶的角色
[root@controller ~]# openstack role list
+----------------------------------+--------+
| ID | Name |
+----------------------------------+--------+
| b644d263f4264ae9b569f89f5ea07522 | reader |
| c528c456159e4d9a91f9c0ae18438905 | member |
| f844d50f331d41ac8f7d61479340ede2 | admin |
+----------------------------------+--------+
# 給zhangsan用戶Engineering項目的普通用戶角色
[root@controller ~]#
openstack role add --user zhangsan --user-domain 210Demo --project Engineering --project-domain 210Demo member
# 給zhangsan用戶Engineering項目的管理員角色
[root@controller ~]#
openstack role add --user zhangsan --user-domain 210Demo --project Engineering --project-domain 210Demo admin
# 給lisi用戶Production項目的普通用戶角色
[root@controller ~]#
openstack role add --user lisi --user-domain 210Demo --project Production --project-domain 210Demo member
openstack role add --user lisi --user-domain 210Demo --project Production --project-domain 210Demo admin
# 創建Devops組並將所有用戶添加到組中
[root@controller ~]#
openstack group create --domain 210Demo Devops
openstack group add user --group-domain 210Demo --user-domain 210Demo Devops zhangsan lisi
[root@controller ~]# openstack role assignment list --user-domain 210Demo --project Engineering --project-domain 210Demo --names
+--------+------------------+-------+---------------------+--------+--------+-----------+
| Role | User | Group | Project | Domain | System | Inherited |
+--------+------------------+-------+---------------------+--------+--------+-----------+
| member | zhangsan@210Demo | | Engineering@210Demo | | | False |
| admin | zhangsan@210Demo | | Engineering@210Demo | | | False |
+--------+------------------+-------+---------------------+--------+--------+-----------+
[root@controller ~]# openstack role assignment list --user-domain 210Demo --project Production --project-domain 210Demo --names
+--------+--------------+-------+--------------------+--------+--------+-----------+
| Role | User | Group | Project | Domain | System | Inherited |
+--------+--------------+-------+--------------------+--------+--------+-----------+
| member | lisi@210Demo | | Production@210Demo | | | False |
| admin | lisi@210Demo | | Production@210Demo | | | False |
+--------+--------------+-------+--------------------+--------+--------+-----------+
[root@controller ~]# cp admin-openrc zhangsanrc
[root@controller ~]# cat zhangsanrc
export OS_PROJECT_DOMAIN_NAME=210Demo # 要改
export OS_PROJECT_NAME=Engineering # 要改
export OS_USER_DOMAIN_NAME=210Demo # 要改
export OS_USERNAME=zhangsan # 要改
export OS_PASSWORD=redhat # 要改
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
# 切換用戶
[root@controller ~]# source zhangsanrc
創建虛擬機
1、創建名字叫web且是public類型的鏡像
**注:如使用以下方式創建鏡像則安裝虛擬機時的所有空間將使用的是該鏡像中的,這點需要理解
2、創建名字是m1.petitle且是public類型的實例類型(紅帽OpenStack中叫flavor)
3、創建安全組
4、為Engineering項目創建密鑰對
注:創建的該密鑰會下載到你現在用的電腦中,即本地
5、配置網絡
創建內部網絡
創建外部網絡
6、創建route
7、創建實例
.
.
.
.
游走在各發行版間老司機QQ群:905201396
不要嫌啰嗦的新手QQ群:756805267
Debian適應QQ群:912567610