Openstack Mitaka 版本 centos7.9安裝-0


Openstack Mitaka 版本 centos7.9安裝

role IP 配置
控制節點 controller 10.0.0.11 3G+ 開啟虛擬化
計算節點 conputer1 10.0.0.31 1G+ 開啟虛擬化

1、基礎配置

yum -y install lrzsz vim ntpdate wget net-tools

cat>/etc/profile.d/vim.sh<<EOF
alias vi=vim
EOF

# 更改國內鏡像源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum makecache fast

1.1 關閉selinux


1.2 關閉防火牆


1.3 配置hosts文件

cat>>/etc/hosts<<EOF
10.0.0.11   controller
10.0.0.31   compute1
EOF

# 更改主機名
hostnamectl set-hostname controller

1.4 配置mitaka本地源

本地源地址:

鏈接:https://pan.baidu.com/s/1ed7CuhMOzBOwOng-8dtwpA
提取碼:6fhk

# 在控制節點安裝 
yum -y install vsftpd
systemctl start vsftpd
systemctl enable vsftpd

cd /var/ftp/pub
mkdir openstackmitaka
mkdir mnt

# 上傳的本地鏡像
[root@controller pub]# ls
mnt  openstackmitaka  Openstack-Mitaka.iso

mount -o loop Openstack-Mitaka.iso openstackmitaka/

# 操作系統鏡像
mount /dev/sr0 /var/ftp/pub/mnt

# 控制節點
## 配置開機自動掛載
cat>>/etc/fstab<<EOF
/var/ftp/pub/Openstack-Mitaka.iso         /var/ftp/pub/openstackmitaka/       iso9660 ro,relatime 0 0
/dev/sr0                                  /var/ftp/pub/mnt                    iso9660 defaults        0 0
EOF


# 配置本地源
cat>/etc/yum.repos.d/openstack.repo<<EOF 
[Base_ISO]
name=base_iso
baseurl=ftp://10.0.0.11/pub/mnt/
gpgcheck=0

[Openstack_Mitaka]
name=openstack mitaka
baseurl=ftp://10.0.0.11/pub/openstackmitaka/Openstack-Mitaka/
gpgcheck=0
EOF

yum makecache fast

1.5 配置時間同步

yum -y install chrony

# 服務端-控制節點
vi /etc/chrony.conf
server ntp1.aliyun.com iburst
allow 10.0.0.0/24

systemctl restart chronyd
# 客戶端-計算節點
 vi /etc/chrony.conf 
 
 server 10.0.0.11 iburst

1.6 安裝基本組件

# 所有節點
yum -y install python-openstackclient openstack-selinux openstack-utils.noarch

1.7 控制節點執行

a、安裝數據庫

yum -y install mariadb-server mariadb python2-PyMySQL

vi /etc/my.cnf
[mysqld]
bind-address = 10.0.0.11
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

systemctl enable mariadb --now

# 初始化數據庫
mysql_secure_installation

## 設置數據庫密碼
Set root password? [Y/n] y
New password:  # <===== openstack
Re-enter new password: 
Password updated successfully!

Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] y
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y

b、安裝rabbitmq

 yum install rabbitmq-server -y
 
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service

# 添加用戶和密碼‘RABBIT_PASS’並授權
rabbitmqctl add_user openstack RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"

# 啟用rabbit dashboard
rabbitmq-plugins enable  rabbitmq_management

c、安裝memcache

認證服務認證緩存使用Memcached緩存令牌。緩存服務memecached運行在控制節點。在生產部署中,我們推薦聯合啟用防火牆、認證和加密保證它的安全。

yum install memcached python-memcached -y

vi /etc/sysconfig/memcached
OPTIONS="controller,10.0.0.11"

systemctl enable memcached.service
systemctl start memcached.service

2.安裝認證服務

在控制節點執行

2.1 創建數據庫

# 創建數據庫
mysql -uroot -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
  IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
  IDENTIFIED BY 'KEYSTONE_DBPASS';

2.2 安裝身份認證服務

yum install openstack-keystone httpd mod_wsgi -y

# 臨時認證token
openssl rand -hex 10
b7ab536ae8ba4c3db833

cp /etc/keystone/keystone.conf{,_bak}
grep -Ev '^$|^#' /etc/keystone/keystone.conf_bak > /etc/keystone/keystone.conf

cat>/etc/keystone/keystone.conf<<EOF
[DEFAULT]
admin_token = b7ab536ae8ba4c3db833
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[eventlet_server_ssl]
[federation]
[fernet_tokens]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[os_inherit]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[resource]
[revoke]
[role]
[saml]
[shadow_users]
[signing]
[ssl]
[token]
provider = fernet

[tokenless_auth]
[trust]
EOF
# 使用命令更改配置可以重復執行。
openstack-config

2.3 同步數據庫

# 同步數據庫
su -s /bin/sh -c "keystone-manage db_sync" keystone

# 初始化成功用此命令可以查看生成的數據表。
mysql keystone -e 'show tables;' -uroot -p

2.4 初始化fernet

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

# 會在/etc/keystone/下生成 fernet-keys目錄,里面存放了fernet 的token;如果身份認證服務在注冊的時候出錯可以重新初始化新的fernet token,把原來的刪除。
/etc/keystone/fernet-keys

2.5 配置httpd服務

vi /etc/httpd/conf/httpd.conf
ServerName controller

vi  /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357

<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

# 啟動服務
systemctl enable httpd --now

2.6 注冊身份認證服務

export OS_TOKEN=b7ab536ae8ba4c3db833
export OS_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3

# 創建身份認證服務
[root@controller ~]# openstack service create --name keystone --description "OpenStack Identity" identity
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Identity               |
| enabled     | True                             |
| id          | db37d54d45274871bea7ba510cc6584d |
| name        | keystone                         |
| type        | identity                         |
+-------------+----------------------------------+

# 身份認證的接口
[root@controller ~]# openstack endpoint create --region RegionOne identity public http://controller:5000/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 9914f144a5fd44cb814997a8d785016d |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:5000/v3        |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | affd11b84ecd401189d7ac3ea5262786 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:5000/v3        |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 5e739dc035834ffebc1567aa69511195 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | db37d54d45274871bea7ba510cc6584d |
| service_name | keystone                         |
| service_type | identity                         |
| url          | http://controller:35357/v3       |
+--------------+----------------------------------+

2.7 創建域、項目、用戶和角色

[root@controller ~]# openstack domain create --description "Default Domain" default
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Default Domain                   |
| enabled     | True                             |
| id          | 1510ca4a5403403b9bb65719bfdf67fe |
| name        | default                          |
+-------------+----------------------------------+
[root@controller ~]# openstack project create --domain default --description "Admin Project" admin
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Admin Project                    |
| domain_id   | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled     | True                             |
| id          | 3a75cae60c7c49b0b3e843e5949975bc |
| is_domain   | False                            |
| name        | admin                            |
| parent_id   | 1510ca4a5403403b9bb65719bfdf67fe |
+-------------+----------------------------------+
[root@controller ~]# openstack user create --domain default --password-prompt admin
User Password:    # <===== admin,用戶的密碼,可以用來登陸dashboard
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | c4d4838b317840c1b54ba1549a1010be |
| name      | admin                            |
+-----------+----------------------------------+
[root@controller ~]# openstack role create admin
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | None                             |
| id        | b1681570a20543c8857b4fa7b5f3ee63 |
| name      | admin                            |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project admin --user admin admin

# 創建一個service項目后面的組件會放在這個項目中
[root@controller ~]# openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled     | True                             |
| id          | f9dee317061e47c7a3d56b7b6f8b2250 |
| is_domain   | False                            |
| name        | service                          |
| parent_id   | 1510ca4a5403403b9bb65719bfdf67fe |
+-------------+----------------------------------+

# 使用keyston認證,自動分發token
vi admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

source admin-openrc

# 需要重新連接下終端 或者unset一下前面三個臨時的變量
[root@controller ~]# openstack token issue
+------------+----------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                          |
+------------+----------------------------------------------------------------------------------------------------------------+
| expires    | 2021-11-05T05:52:01.962585Z                                                                                    |
| id         | gAAAAABhhLhx874niGA3YGqO2tZ4jiECJxmLrdl4Uqqwdp0oKX-oCCjTG7Ts5ff8bApGeIdAEil59MuwU2NRlHo6LP0LVvMSGYgFyPkKK5y2_i |
|            | UZD0gjc3omArcgkJgjzgJsv0yVKOJOAa6DhV4T4qXvbKwKQI9J5fzICgrzXnyS2kqQwcmU-xY                                      |
| project_id | 3a75cae60c7c49b0b3e843e5949975bc                                                                               |
| user_id    | c4d4838b317840c1b54ba1549a1010be                                                                               |
+------------+----------------------------------------------------------------------------------------------------------------+

[root@controller ~]# openstack endpoint list

3.glance 鏡像服務

在控制節點

3.1 創建數據庫

mysql -uroot -p

CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';

3.2 創建用戶

[root@controller ~]# openstack user create --domain default --password-prompt glance
User Password:    # <==== glance
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | bd2f2e471ab74de1abdee9fe171fb892 |
| name      | glance                           |
+-----------+----------------------------------+

[root@controller ~]# openstack role add --project service --user glance admin

3.3 注冊服務目錄信息

[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image                  |
| enabled     | True                             |
| id          | f7636aa78fc946e5a9f0464b0c2a5209 |
| name        | glance                           |
| type        | image                            |
+-------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 8acfd45ca57b4dcabdfa71e1cc1bda7c |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 3c5c594af71e4e27a932a9244c332770 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2879c39c7dc64db8a5f82d479b6d2f08 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | f7636aa78fc946e5a9f0464b0c2a5209 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

3.4 安裝並配置

yum install openstack-glance -y
cp /etc/glance/glance-api.conf{,_bak}
grep -Ev '^$|^#' /etc/glance/glance-api.conf_bak > /etc/glance/glance-api.conf
[root@controller ~]# cat>/etc/glance/glance-api.conf<<EOF
[DEFAULT]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

[image_format]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
EOF
cp /etc/glance/glance-registry.conf{,_bak}
grep -Ev '^$|^#' /etc/glance/glance-registry.conf_bak > /etc/glance/glance-registry.conf

[root@controller ~]# cat>/etc/glance/glance-registry.conf<<EOF
[DEFAULT]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[glance_store]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance

[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone

[profiler]
EOF
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
Option "verbose" from group "DEFAULT" is deprecated for removal.  Its value may be silently ignored in the future.
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1056: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
  expire_on_commit=expire_on_commit, _conf=conf)
  
# 驗證
mysql glance -uroot -p -e 'show tables;'

3.4 啟動服務

systemctl enable openstack-glance-api.service \
  openstack-glance-registry.service
systemctl start openstack-glance-api.service \
  openstack-glance-registry.service
  

# 查看角色、項目、用戶關系
openstack role assignment list

3.5 上傳鏡像測試

openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+------------------------------------------------------+
| Field            | Value                                                |
+------------------+------------------------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6                     |
| container_format | bare                                                 |
| created_at       | 2021-11-05T06:03:36Z                                 |
| disk_format      | qcow2                                                |
| file             | /v2/images/e2f926f4-f466-4239-8444-178a30ae7744/file |
| id               | e2f926f4-f466-4239-8444-178a30ae7744                 |
| min_disk         | 0                                                    |
| min_ram          | 0                                                    |
| name             | cirros                                               |
| owner            | 3a75cae60c7c49b0b3e843e5949975bc                     |
| protected        | False                                                |
| schema           | /v2/schemas/image                                    |
| size             | 13287936                                             |
| status           | active                                               |
| tags             |                                                      |
| updated_at       | 2021-11-05T06:03:37Z                                 |
| virtual_size     | None                                                 |
| visibility       | public                                               |
+------------------+------------------------------------------------------+

[root@controller ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| e2f926f4-f466-4239-8444-178a30ae7744 | cirros |
+--------------------------------------+--------+

4.nova服務

控制節點安裝

nova-api 服務

接收和響應來自最終用戶的計算API請求。此服務支持OpenStack計算服務API,Amazon EC2 API,以及特殊的管理API用於賦予用戶做一些管理的操作。它會強制實施一些規則,發起多數的編排活動,例如運行一個實例。

nova-api-metadata 服務

接受來自虛擬機發送的元數據請求。服務一般在安裝nova-network服務的多主機模式下使用。

一個持續工作的守護進程,通過Hypervior的API來創建和銷毀虛擬機實例。例如:

  • XenServer/XCP 的 XenAPI
  • KVM 或 QEMU 的 libvirt
  • VMware 的 VMwareAPI

過程是蠻復雜的。最為基本的,守護進程同意了來自隊列的動作請求,轉換為一系列的系統命令如啟動一個KVM實例,然后,到數據庫中更新它的狀態。

nova-scheduler 服務拿到一個來自隊列請求虛擬機實例,然后決定那台計算服務器主機來運行它。

nova-conductor模塊

媒介作用於nova-compute服務與數據庫之間。它排除了由nova-compute服務對雲數據庫的直接訪問。nova-conductor模塊可以水平擴展。但是,不要將它部署在運行nova-compute服務的主機節點上。

nova-cert模塊

服務器守護進程向Nova Cert服務提供X509證書。用來為euca-bundle-image生成證書。僅僅是在EC2 API的請求中使用

nova-network worker 守護進程

nova-compute服務類似,從隊列中接受網絡任務,並且操作網絡。執行任務例如創建橋接的接口或者改變IPtables的規則。

nova-consoleauth 守護進程

授權控制台代理所提供的用戶令牌。詳情可查看nova-novncproxynova-xvpvncproxy。該服務必須為控制台代理運行才可奏效。在集群配置中你可以運行二者中任一代理服務而非僅運行一個nova-consoleauth服務。

nova-novncproxy 守護進程

提供一個代理,用於訪問正在運行的實例,通過VNC協議,支持基於瀏覽器的novnc客戶端。

nova-spicehtml5proxy 守護進程

提供一個代理,用於訪問正在運行的實例,通過 SPICE 協議,支持基於瀏覽器的 HTML5 客戶端。

nova-xvpvncproxy 守護進程

提供一個代理,用於訪問正在運行的實例,通過VNC協議,支持OpenStack特定的Java客戶端。

nova-cert 守護進程

X509 證書。

nova客戶端

用於用戶作為租戶管理員或最終用戶來提交命令。

隊列

一個在守護進程間傳遞消息的中央集線器。常見實現有RabbitMQ Zero MQ__等AMQP消息隊列。

SQL數據庫

存儲構建時和運行時的狀態,為雲基礎設施,包括有:

  • 可用實例類型
  • 使用中的實例
  • 可用網絡
  • 項目

理論上,OpenStack計算可以支持任何和SQL-Alchemy所支持的后端數據庫,通常使用SQLite3來做測試可開發工作,MySQL和PostgreSQL 作生產環境。

4.1 創建數據庫

mysql -u root -p

CREATE DATABASE nova_api;
CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';

4.2 創建用戶注冊api

[root@controller ~]# . admin-openrc
[root@controller ~]# openstack user create --domain default --password-prompt nova
User Password:    # <==== nova
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | 59b3ca2333324089ad3045d583ef4dbd |
| name      | nova                             |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project service --user nova admin
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | ed74f11bf10e4371a2361d1773523ab0          |
| interface    | public                                    |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 52eed6e272ee451ea1f0af4417a2380a          |
| interface    | internal                                  |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field        | Value                                     |
+--------------+-------------------------------------------+
| enabled      | True                                      |
| id           | 97b7b8ac4ea84ff096682a91df829175          |
| interface    | admin                                     |
| region       | RegionOne                                 |
| region_id    | RegionOne                                 |
| service_id   | 0fc8b3c4e2ba40bdafa6a21d9ab6eb5b          |
| service_name | nova                                      |
| service_type | compute                                   |
| url          | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+

4.3 安裝並配置

yum install openstack-nova-api openstack-nova-conductor \
  openstack-nova-console openstack-nova-novncproxy \
  openstack-nova-scheduler -y
cp /etc/nova/nova.conf{,_bak}
grep -Ev '^$|^#' /etc/nova/nova.conf_bak > /etc/nova/nova.conf

[root@controller ~]# vi /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 10.0.0.11
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292

[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip

[workarounds]
[xenserver]

# 同步數據庫
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova

4.4 啟動服務

systemctl enable openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service \
  openstack-nova-consoleauth.service openstack-nova-scheduler.service \
  openstack-nova-conductor.service openstack-nova-novncproxy.service

計算節點安裝

4.4 安裝nova-compute

yum install openstack-nova-compute libvirt-client -y

4.5 配置

cp /etc/nova/nova.conf{,_bak}
grep -Ev '^$|^#' /etc/nova/nova.conf_bak > /etc/nova/nova.conf

vi /etc/nova/nova.conf
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
# 本機的管理IP地址
my_ip = 10.0.0.31
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]
[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292

[guestfs]
[hyperv]
[image_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova

[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
# vnc接口的地址如果是主機名那么客戶機windows需要添加hosts解析控制台才不會報錯。
# 如果不更改客戶機hosts,那么使用控制節點IP地址即可。
novncproxy_base_url = http://controller:6080/vnc_auto.html

[workarounds]
[xenserver]


# 如果返回為0需要進行下面的設置;結合后面創建的虛擬機來看,這里不配置會導致虛擬機開機啟動不起來,不同7系列版本可能會不一致。
egrep -c '(vmx|svm)' /proc/cpuinfo

vi /etc/nova/nova.conf
[libvirt]
...
virt_type = qemu
cpu_mode = none

4.6 啟動服務

systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service

4.7 驗證

. admin-openrc
openstack compute service list
+----+------------------+------------+----------+---------+-------+----------------------------+
| Id | Binary           | Host       | Zone     | Status  | State | Updated At                 |
+----+------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-scheduler   | controller | internal | enabled | up    | 2021-11-05T06:58:35.000000 |
|  2 | nova-conductor   | controller | internal | enabled | up    | 2021-11-05T06:58:29.000000 |
|  3 | nova-consoleauth | controller | internal | enabled | up    | 2021-11-05T06:58:30.000000 |
|  7 | nova-compute     | compute1   | nova     | enabled | up    | 2021-11-05T06:58:27.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+

5.安裝neutron網絡服務

控制節點安裝

5.1 配置數據庫

mysql -u root -p

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
  IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
  IDENTIFIED BY 'NEUTRON_DBPASS';

5.2 創建用戶並注冊endpoint

[root@controller ~]# openstack user create --domain default --password-prompt neutron
User Password:    # <==== neutron
Repeat User Password:
+-----------+----------------------------------+
| Field     | Value                            |
+-----------+----------------------------------+
| domain_id | 1510ca4a5403403b9bb65719bfdf67fe |
| enabled   | True                             |
| id        | d9151535769a4bb386550a2b466f13f1 |
| name      | neutron                          |
+-----------+----------------------------------+

[root@controller ~]# openstack role add --project service --user neutron admin

[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | 4f17e060610e4f00b51bbdb78d5941fb |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 4bcdd9a9e33f4e7bbd6025a2157dbf42 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | cd78e2cb2dec4e15af526fe840abe6e3 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2e4a8c6e0c7b4e178007c9ee2f3f60a0 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f17e060610e4f00b51bbdb78d5941fb |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+

# 驗證
[root@controller ~]# openstack endpoint list|grep network
| 63d6214c80f24a9ba7068b9e31d2d9b0 | RegionOne | neutron      | network      | True    | admin     | http://controller:9696                    |
| 74d918942dac446ebcb177558a4d361d | RegionOne | neutron      | network      | True    | public    | http://controller:9696                    |
| a2685cde3ffc4916be6aa970adfbd0cd | RegionOne | neutron      | network      | True    | internal  | http://controller:9696         

5.3 安裝&配置

controller 配置

yum install openstack-neutron openstack-neutron-ml2 \
  openstack-neutron-linuxbridge ebtables -y

cp /etc/neutron/neutron.conf{,_bak}
grep -Ev '^#|^$' /etc/neutron/neutron.conf_bak > /etc/neutron/neutron.conf

[root@controller ~]# cat /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins =
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True

[agent]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_policy]
[quotas]
[ssl]


cp /etc/neutron/plugins/ml2/ml2_conf.ini{,_bak}
grep -Ev '^#|^$' /etc/neutron/plugins/ml2/ml2_conf.ini_bak > /etc/neutron/plugins/ml2/ml2_conf.ini

[root@controller ~]# cat /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
[securitygroup]
enable_ipset = True

# ====== 配置二層網絡幾個重要的參數 ======
type_drivers = flat,vlan      # flat 扁平化網絡,vlan
tenant_network_types =        # 三層網絡使用,為空表示禁用
mechanism_drivers = linuxbridge    # 橋接 
flat_networks = provider      # 二層網絡的名稱 provider


cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,_bak}
grep -Ev '^$|^#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini_bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[root@controller ~]# cat /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0

[securitygroup]
# 打開安全組,控制安全組的驅動
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]
enable_vxlan = False

# ====== physical_interface_mappings = provider:eth0 ======    
# 這里的網絡與上面的相對應,橋接到物理網卡eth0上面。


[root@controller ~]# cat /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True

# 生成metadata_proxy_shared_secret  只要nova.conf中相同即可
openssl rand -hex 10  

[root@controller ~]# cat /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = d4e44a1b239c039806c9


vim /etc/nova/nova.conf
。。。。。。
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

service_metadata_proxy = True
metadata_proxy_shared_secret = d4e44a1b239c039806c9


ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
 # ====== 此處省略 =====
INFO  [alembic.runtime.migration] Running upgrade 2a16083502f3 -> 2e5352a0ad4d, Add missing foreign keys
INFO  [alembic.runtime.migration] Running upgrade 2e5352a0ad4d -> 11926bcfe72d, add geneve ml2 type driver
INFO  [alembic.runtime.migration] Running upgrade 11926bcfe72d -> 4af11ca47297, Drop cisco monolithic tables
INFO  [alembic.runtime.migration] Running upgrade 4af11ca47297 -> 1b294093239c, Drop embrane plugin table
INFO  [alembic.runtime.migration] Running upgrade 1b294093239c, 32e5974ada25 -> 8a6d8bdae39, standardattributes migration
INFO  [alembic.runtime.migration] Running upgrade 8a6d8bdae39 -> 2b4c2465d44b, DVR sheduling refactoring
INFO  [alembic.runtime.migration] Running upgrade 2b4c2465d44b -> e3278ee65050, Drop NEC plugin tables
INFO  [alembic.runtime.migration] Running upgrade e3278ee65050, 15e43b934f81 -> c6c112992c9, rbac_qos_policy
INFO  [alembic.runtime.migration] Running upgrade c6c112992c9 -> 5ffceebfada, network_rbac_external
INFO  [alembic.runtime.migration] Running upgrade 5ffceebfada, 0e66c5227a8a -> 4ffceebfcdc, standard_desc
  OK

systemctl restart openstack-nova-api.service

5.4 啟動驗證

systemctl enable neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service
  
systemctl start neutron-server.service \
  neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
  neutron-metadata-agent.service

# 需要等待一段時間
[root@controller ~]# neutron agent-list
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host       | availability_zone | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| 354633ca-ebec-4b90-84dd-339e4b55144e | Linux bridge agent | controller |                   | :-)   | True           | neutron-linuxbridge-agent |
| 50c0fc56-79eb-42fe-97ae-887333618568 | DHCP agent         | controller | nova              | :-)   | True           | neutron-dhcp-agent        |
| e004250b-c51a-44b4-89e9-091f11a0a4da | Metadata agent     | controller |                   | :-)   | True           | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+

計算節點安裝

5.5 安裝配置

yum install openstack-neutron-linuxbridge ebtables ipset -y

cp /etc/neutron/neutron.conf{,_bak}
grep -Ev '^#|^$' /etc/neutron/neutron.conf_bak > /etc/neutron/neutron.conf

[root@compute1 ~]# cat>/etc/neutron/neutron.conf<<EOF
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone

[agent]
[cors]
[cors.subdomain]
[database]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS

[oslo_policy]
[quotas]
[ssl]
EOF

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,_bak}
grep -Ev '^$|^#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini_bak  >/etc/neutron/plugins/ml2/linuxbridge_agent.ini

vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth0
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]
enable_vxlan = False

如果OpenStack宿主機是Exsi創建的虛擬機,則必須要進行以下設置。

這個配置主要是讓Exsi創建的虛擬機的網卡可以進入混雜模式。

如果沒有這個設置,那么就會導致Exsi創建的虛擬機網卡不能分發包到OpenStack創建到虛擬機。

vi /etc/nova/nova.conf
...
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
...

5.6 啟動

systemctl restart openstack-nova-compute.service

systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

[root@controller ~]# neutron agent-list
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| id                                   | agent_type         | host       | availability_zone | alive | admin_state_up | binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| 354633ca-ebec-4b90-84dd-339e4b55144e | Linux bridge agent | controller |                   | :-)   | True           | neutron-linuxbridge-agent |
| 50c0fc56-79eb-42fe-97ae-887333618568 | DHCP agent         | controller | nova              | :-)   | True           | neutron-dhcp-agent        |
| 7af96de4-c3da-4773-bc23-b77cd896231f | Linux bridge agent | compute1   |                   | :-)   | True           | neutron-linuxbridge-agent |
| e004250b-c51a-44b4-89e9-091f11a0a4da | Metadata agent     | controller |                   | :-)   | True           | neutron-metadata-agent    |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+

6.安裝dashboard

在控制節點

6.1 安裝

 yum install openstack-dashboard -y

6.2 配置

實質是在更改一個django的配置文件。

# 找到下面的配置並更改
vi /etc/openstack-dashboard/local_settings

# 在 controller 節點上配置儀表盤以使用 OpenStack 服務:
OPENSTACK_HOST = "controller"

# 允許所有主機訪問儀表板
ALLOWED_HOSTS = ['*', ]

# 配置 memcached 會話存儲服務
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

# 啟用第3版認證API
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

# 啟用對域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

# 配置API版本
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}

# 通過儀表盤創建用戶時的默認域配置為 default
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"

# 通過儀表盤創建的用戶默認角色配置為 user 
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

# 如果您選擇網絡參數1,禁用支持3層網絡服務
OPENSTACK_NEUTRON_NETWORK = {
    ...
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
}

# 配置時區
TIME_ZONE = "UTC"

vi /etc/httpd/conf.d/openstack-dashboard.conf
# 第四行添加
WSGIApplicationGroup %{GLOBAL}

6.3 啟動

systemctl restart httpd.service memcached.service

# 訪問地址 
http://10.0.0.11/dashboard
用戶名:admin
密碼:admin

7.命令行啟動實例

7.1 創建網絡

[root@controller ~]# . admin-openrc
[root@controller ~]# neutron net-create --shared --provider:physical_network provider --provider:network_type flat provider
Created a new network:
+---------------------------+--------------------------------------+
| Field                     | Value                                |
+---------------------------+--------------------------------------+
| admin_state_up            | True                                 |
| availability_zone_hints   |                                      |
| availability_zones        |                                      |
| created_at                | 2021-11-06T07:18:02                  |
| description               |                                      |
| id                        | e54cd98e-df99-411d-9a5b-5b5537ea6464 |
| ipv4_address_scope        |                                      |
| ipv6_address_scope        |                                      |
| mtu                       | 1500                                 |
| name                      | provider                             |
| port_security_enabled     | True                                 |
| provider:network_type     | flat                                 |
| provider:physical_network | provider                             |
| provider:segmentation_id  |                                      |
| router:external           | False                                |
| shared                    | True                                 |
| status                    | ACTIVE                               |
| subnets                   |                                      |
| tags                      |                                      |
| tenant_id                 | 3a75cae60c7c49b0b3e843e5949975bc     |
| updated_at                | 2021-11-06T07:18:02                  |
+---------------------------+--------------------------------------+

--shared    # 共享網絡,其它用戶或者項目可以使用。
--provider:physical_network provider    #使用物理網絡 provider 與neutron配置文件中ini文件中的對應。
--provider:network_type flat    # 網絡類型 flat,特點:flat型網絡一般獨占一個物理網卡。
provider           # 網絡名稱

7.1 創建子網

neutron subnet-create --name provider-net \
  --allocation-pool start=START_IP_ADDRESS,end=END_IP_ADDRESS \
  --dns-nameserver DNS_RESOLVER --gateway PROVIDER_NETWORK_GATEWAY \
  provider PROVIDER_NETWORK_CIDR
  
START_IP_ADDRESS      # 開始的第一個IP
END_IP_ADDRESS        # 結束最后一個IP地址
DNS_RESOLVER          # dns地址
PROVIDER_NETWORK_GATEWAY    # 網關地址
provider              # 與這個網絡關聯
PROVIDER_NETWORK_CIDR       # 類似192.168.0.0/24

# 因為橋接的是eth0 網卡,所以創建的是與虛擬機一樣的IP段,這樣虛擬機就可以通過eth0上網。
[root@controller ~]# neutron subnet-create --name provider-net \
   --allocation-pool start=10.0.0.150,end=10.0.0.160 \
   --dns-nameserver 223.5.5.5 --gateway 10.0.0.1 \
   provider 10.0.0.0/24
   
Created a new subnet:
+-------------------+----------------------------------------------+
| Field             | Value                                        |
+-------------------+----------------------------------------------+
| allocation_pools  | {"start": "10.0.0.150", "end": "10.0.0.160"} |
| cidr              | 10.0.0.0/24                                  |
| created_at        | 2021-11-06T07:30:06                          |
| description       |                                              |
| dns_nameservers   | 223.5.5.5                                    |
| enable_dhcp       | True                                         |
| gateway_ip        | 10.0.0.1                                     |
| host_routes       |                                              |
| id                | b3643d73-0925-485e-bcce-c22e6e4c2d3b         |
| ip_version        | 4                                            |
| ipv6_address_mode |                                              |
| ipv6_ra_mode      |                                              |
| name              | provider-net                                 |
| network_id        | e54cd98e-df99-411d-9a5b-5b5537ea6464         |
| subnetpool_id     |                                              |
| tenant_id         | 3a75cae60c7c49b0b3e843e5949975bc             |
| updated_at        | 2021-11-06T07:30:06                          |
+-------------------+----------------------------------------------+

7.2 創建實例模板

有默認的虛擬機模板

[root@controller ~]# openstack flavor list
+----+-----------+-------+------+-----------+-------+-----------+
| ID | Name      |   RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+-----------+-------+------+-----------+-------+-----------+
| 1  | m1.tiny   |   512 |    1 |         0 |     1 | True      |
| 2  | m1.small  |  2048 |   20 |         0 |     1 | True      |
| 3  | m1.medium |  4096 |   40 |         0 |     2 | True      |
| 4  | m1.large  |  8192 |   80 |         0 |     4 | True      |
| 5  | m1.xlarge | 16384 |  160 |         0 |     8 | True      |
+----+-----------+-------+------+-----------+-------+-----------+

# 如果要創建
[root@controller ~]# openstack flavor create --id 0 --vcpus 1 --ram 128 --disk 5 my_create
+----------------------------+-----------+
| Field                      | Value     |
+----------------------------+-----------+
| OS-FLV-DISABLED:disabled   | False     |
| OS-FLV-EXT-DATA:ephemeral  | 0         |
| disk                       | 5         |
| id                         | 0         |
| name                       | my_create |
| os-flavor-access:is_public | True      |
| ram                        | 128       |
| rxtx_factor                | 1.0       |
| swap                       |           |
| vcpus                      | 1         |
+----------------------------+-----------+

7.3 創建密鑰對

密鑰對可以無密碼直接ssh連接虛擬機,如果創建了會在創建實例的時候注入。

ssh-keygen -q -N "" -f ~/.ssh/id_rsa
[root@controller ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub my_ssh_key
+-------------+-------------------------------------------------+
| Field       | Value                                           |
+-------------+-------------------------------------------------+
| fingerprint | 44:59:f7:cb:05:70:8b:4b:e1:50:e8:36:4f:bf:51:f4 |
| name        | my_ssh_key                                      |
| user_id     | c4d4838b317840c1b54ba1549a1010be                |
+-------------+-------------------------------------------------+

7.4 創建安全組規則

默認情況下應該是可以ping通的,如果不行考慮添加。

[root@controller ~]# openstack security group rule create --proto icmp default
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| id                    | af241acf-296b-428d-b368-529ec09c9dfe |
| ip_protocol           | icmp                                 |
| ip_range              | 0.0.0.0/0                            |
| parent_group_id       | 15b8498c-eb91-47a2-8f7e-004c93d4803b |
| port_range            |                                      |
| remote_security_group |                                      |
+-----------------------+--------------------------------------+
[root@controller ~]# openstack security group rule create --proto tcp --dst-port 22 default
+-----------------------+--------------------------------------+
| Field                 | Value                                |
+-----------------------+--------------------------------------+
| id                    | 73ada95f-81ff-42b1-95c9-e732d2f6480b |
| ip_protocol           | tcp                                  |
| ip_range              | 0.0.0.0/0                            |
| parent_group_id       | 15b8498c-eb91-47a2-8f7e-004c93d4803b |
| port_range            | 22:22                                |
| remote_security_group |                                      |
+-----------------------+--------------------------------------+

7.5 啟動實例

[root@controller ~]# openstack flavor list
+----+-----------+-------+------+-----------+-------+-----------+
| ID | Name      |   RAM | Disk | Ephemeral | VCPUs | Is Public |
+----+-----------+-------+------+-----------+-------+-----------+
| 0  | my_create |   128 |    5 |         0 |     1 | True      |
| 1  | m1.tiny   |   512 |    1 |         0 |     1 | True      |
| 2  | m1.small  |  2048 |   20 |         0 |     1 | True      |
| 3  | m1.medium |  4096 |   40 |         0 |     2 | True      |
| 4  | m1.large  |  8192 |   80 |         0 |     4 | True      |
| 5  | m1.xlarge | 16384 |  160 |         0 |     8 | True      |
+----+-----------+-------+------+-----------+-------+-----------+

[root@controller ~]# openstack image list
+--------------------------------------+--------+--------+
| ID                                   | Name   | Status |
+--------------------------------------+--------+--------+
| e2f926f4-f466-4239-8444-178a30ae7744 | cirros | active |
+--------------------------------------+--------+--------+

[root@controller ~]# neutron net-list
+--------------------------------------+----------+--------------------------------------------------+
| id                                   | name     | subnets                                          |
+--------------------------------------+----------+--------------------------------------------------+
| e54cd98e-df99-411d-9a5b-5b5537ea6464 | provider | b3643d73-0925-485e-bcce-c22e6e4c2d3b 10.0.0.0/24 |
+--------------------------------------+----------+--------------------------------------------------+

# 創建虛擬機
openstack server create --flavor my_create --image cirros \
  --nic net-id=e54cd98e-df99-411d-9a5b-5b5537ea6464 --security-group default \
  --key-name my_ssh_key cirros-tt
  
+--------------------------------------+-----------------------------------------------+
| Field                                | Value                                         |
+--------------------------------------+-----------------------------------------------+
| OS-DCF:diskConfig                    | MANUAL                                        |
| OS-EXT-AZ:availability_zone          |                                               |
| OS-EXT-SRV-ATTR:host                 | None                                          |
| OS-EXT-SRV-ATTR:hypervisor_hostname  | None                                          |
| OS-EXT-SRV-ATTR:instance_name        | instance-00000001                             |
| OS-EXT-STS:power_state               | 0                                             |
| OS-EXT-STS:task_state                | scheduling                                    |
| OS-EXT-STS:vm_state                  | building                                      |
| OS-SRV-USG:launched_at               | None                                          |
| OS-SRV-USG:terminated_at             | None                                          |
| accessIPv4                           |                                               |
| accessIPv6                           |                                               |
| addresses                            |                                               |
| adminPass                            | HL964yvkPF7n                                  |
| config_drive                         |                                               |
| created                              | 2021-11-06T07:50:12Z                          |
| flavor                               | my_create (0)                                 |
| hostId                               |                                               |
| id                                   | 0b5e4f81-e811-44f7-b149-eec8465fa841          |
| image                                | cirros (e2f926f4-f466-4239-8444-178a30ae7744) |
| key_name                             | my_ssh_key                                    |
| name                                 | cirros-tt                                     |
| os-extended-volumes:volumes_attached | []                                            |
| progress                             | 0                                             |
| project_id                           | 3a75cae60c7c49b0b3e843e5949975bc              |
| properties                           |                                               |
| security_groups                      | [{u'name': u'default'}]                       |
| status                               | BUILD                                         |
| updated                              | 2021-11-06T07:50:13Z                          |
| user_id                              | c4d4838b317840c1b54ba1549a1010be              |
+--------------------------------------+-----------------------------------------------+

[root@controller ~]# openstack server list
+--------------------------------------+-----------+--------+---------------------+
| ID                                   | Name      | Status | Networks            |
+--------------------------------------+-----------+--------+---------------------+
| 64a7c56c-039f-47b4-be73-1561e3a1c3d5 | cirros-tt | ACTIVE | provider=10.0.0.153 |
+--------------------------------------+-----------+--------+---------------------+

# 實例的控制台訪問地址
[root@controller ~]# openstack console url show cirros-tt
+-------+---------------------------------------------------------------------------------+
| Field | Value                                                                           |
+-------+---------------------------------------------------------------------------------+
| type  | novnc                                                                           |
| url   | http://controller:6080/vnc_auto.html?token=5306cd0e-fc1f-4481-a5e3-0ce45642f76a |
+-------+---------------------------------------------------------------------------------+

# 這里顯示的controller 是控制節點的主機名,如果在windows中沒有配置解析會訪問不了。解決方法
方法一: 配置hosts解析
方法二:修改計算節點nova.conf配置文件
vi /etc/nova/nova.conf
... ... ...
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://10.0.0.11:6080/vnc_auto.html

systemctl restart openstack-nova-compute.service


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM