系統環境
[root@controller ~]# cat /etc/redhat-release Fedora release 25 (Twenty Five) [root@controller ~]# uname -a Linux controller 4.8.6-300.fc25.x86_64 #1 SMP Tue Nov 1 12:36:38 UTC 2016 x86_64 x86_64 x86_64 GNU/Linux
網絡配置
[root@controller ml2]# vi /etc/sysconfig/network-scripts/ifcfg-eno1 HWADDR=2C:59:E5:47:A8:C8 TYPE=Ethernet BOOTPROTO=none DEFROUTE=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=eno1 UUID=fcd7ea2e-098c-3a09-bfa7-9a089b36ccf3 ONBOOT=yes AUTOCONNECT_PRIORITY=-999 IPADDR=10.0.100.210 PREFIX=24 GATEWAY=10.0.100.1 DNS1=10.0.100.10 IPV6_PEERDNS=yes IPV6_PEERROUTES=yes IPV6_PRIVACY=no [root@controller ml2]# vi /etc/sysconfig/network-scripts/ifcfg-eno2 HWADDR=2C:59:E5:47:A8:C9 TYPE=Ethernet BOOTPROTO=static DEFROUTE=yes PEERDNS=yes PEERROUTES=yes IPV4_FAILURE_FATAL=no IPV6INIT=yes IPV6_AUTOCONF=yes IPV6_DEFROUTE=yes IPV6_PEERDNS=yes IPV6_PEERROUTES=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=eno2 UUID=90075ab3-240d-3371-a2f6-76b1076e82e0 ONBOOT=yes AUTOCONNECT_PRIORITY=-999 IPADDR=10.0.0.17 NETMASK=255.255.255.0 GATEWAY=10.0.0.254 DNS=10.0.100.10
一、網卡做聚合
nmcli con add con-name team1 ifname teamif type team config '{"runner":{"name":"roundrobin"}}' nmcli connection add con-name team-subif1 ifname ifcfg-enp2s0f1 type team-slave master team1 nmcli connection add con-name team-subif2 ifname ifcfg-enp3s0f0 type team-slave master team1 nmcli connection add con-name team-subif3 ifname ifcfg-enp3s0f1 type team-slave master team1 nmcli con mod team1 ipv4.add 10.0.0.13/24 nmcli con mod team1 ipv4.gate 10.0.0.1 nmcli con mod team1 ipv4.meth man [root@control3 network-scripts]# nmcli connection show NAME UUID TYPE DEVICE enp2s0f0 23dae73f-1caf-3db3-988c-6bdfc184668c 802-3-ethernet enp2s0f0 enp2s0f1 2462d65e-7d69-3e6f-aa12-2fd821067e8a 802-3-ethernet enp2s0f1 enp3s0f1 30ea3af7-4f31-3564-b14c-bce6b96dab79 802-3-ethernet enp3s0f1 team1 18659422-e906-4f5c-bece-39646d28414a team teamif dhcp 06da1e4f-f3c0-49dd-a5de-dd0d94161015 802-3-ethernet -- enp3s0f0 a95ceb14-7539-3c92-8eb3-d082bd964a1a 802-3-ethernet -- team-subif1 3b72f586-01f3-4f3e-b2e8-e4820bb4250c 802-3-ethernet -- team-subif2 dfd4f154-374c-46e2-8c37-0e2083dc71cf 802-3-ethernet -- team-subif3 523762a7-6fd2-4541-93af-97a7497d58ba 802-3-ethernet -- team1 2feb427b-ecc7-44ee-9c42-472f8457e7c9 team -- [root@control3 network-scripts]# nmcli con del static Connection 'static' (25569dd0-3853-42b1-9619-6866b0d4dc88) successfully deleted. Connection 'static' (6a03b679-8d9d-43f4-8c1f-688a2496ef5c) successfully deleted. Connection 'static' (c0a34979-cda7-424f-bf39-87aa8857e3de) successfully deleted.
二、分區
1、查看盤符
[root@comput4 ~]# fdisk -l Disk /dev/sda: 447.1 GiB, 480103981056 bytes, 937703088 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0x26ec1cfd Device Boot Start End Sectors Size Id Type /dev/sda1 * 2048 976895 974848 476M 83 Linux /dev/sda2 976896 8976383 7999488 3.8G 82 Linux swap / Solaris /dev/sda3 8976384 218691583 209715200 100G 83 Linux
2、開始分區
[root@comput4 ~]# fdisk /dev/sda Welcome to fdisk (util-linux 2.29.1). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. Command (m for help): n Partition type p primary (3 primary, 0 extended, 1 free) e extended (container for logical partitions) Select (default e): p Selected partition 4 First sector (218691584-937703087, default 218691584): 回車 Last sector, +sectors or +size{K,M,G,T,P} (218691584-937703087, default 937703087): 回車 Created a new partition 4 of type 'Linux' and of size 342.9 GiB. Command (m for help): w The partition table has been altered. Calling ioctl() to re-read partition table. Re-reading the partition table failed.: Device or resource busy The kernel still uses the old table. The new table will be used at the next reboot or after you run partprobe(8) or kpartx(8).
3、因為partprobe可以使kernel重新讀取分區信息,從而避免重啟系統。
[root@comput4 ~]# partprobe
4、檢查新分的區
[root@comput4 ~]# fdisk -l
Disk /dev/sda: 447.1 GiB, 480103981056 bytes, 937703088 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x26ec1cfd
Device Boot Start End Sectors Size Id Type
/dev/sda1 * 2048 976895 974848 476M 83 Linux
/dev/sda2 976896 8976383 7999488 3.8G 82 Linux swap / Solaris
/dev/sda3 8976384 218691583 209715200 100G 83 Linux
/dev/sda4 218691584 937703087 719011504 342.9G 83 Linux
5、格式化為XFS分區,並掛載
[root@comput4 ~]# mkfs.xfs -d su=64k,sw=4 /dev/sda4 -f meta-data=/dev/sda4 isize=512 agcount=16, agsize=5617264 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=0, rmapbt=0, reflink=0 data = bsize=4096 blocks=89876224, imaxpct=25 = sunit=16 swidth=64 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=1 log =internal log bsize=4096 blocks=43888, version=2 = sectsz=512 sunit=16 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0
[root@comput4 ~]# echo "/dev/sda4 /export/sda4 xfs defaults 0 0" >> /etc/fstab
[root@comput4 ~]# mkdir -p /export/sda4 && mount -a && mkdir -p /export/sda4/brick
6、檢查掛載
[root@comput4 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 32G 0 32G 0% /dev
tmpfs 32G 0 32G 0% /dev/shm
tmpfs 32G 1.8M 32G 1% /run
tmpfs 32G 0 32G 0% /sys/fs/cgroup
/dev/sda3 100G 1.3G 99G 2% /
tmpfs 32G 0 32G 0% /tmp
/dev/sda1 453M 113M 314M 27% /boot
tmpfs 6.3G 0 6.3G 0% /run/user/0
/dev/sda4 343G 383M 343G 1% /export/sda4
7、把現有分區改成lvm類型
#查看現有分區
[root@control4 cinder]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 32G 0 32G 0% /dev
tmpfs 32G 0 32G 0% /dev/shm
tmpfs 32G 1.6M 32G 1% /run
tmpfs 32G 0 32G 0% /sys/fs/cgroup
/dev/sda3 100G 3.4G 97G 4% /
tmpfs 32G 24M 32G 1% /tmp
/dev/sda1 477M 127M 321M 29% /boot
/dev/sda4 343G 33M 343G 1% /export/sda4 #我們要格式化這個分區,改成LVM格式
tmpfs 6.3G 0 6.3G 0% /run/user/0
#卸載sda4
[root@control4 cinder]# umount /export/sda4
[root@control4 cinder]# df -h Filesystem Size Used Avail Use% Mounted on devtmpfs 32G 0 32G 0% /dev tmpfs 32G 0 32G 0% /dev/shm tmpfs 32G 1.6M 32G 1% /run tmpfs 32G 0 32G 0% /sys/fs/cgroup /dev/sda3 100G 3.4G 97G 4% / tmpfs 32G 24M 32G 1% /tmp /dev/sda1 477M 127M 321M 29% /boot tmpfs 6.3G 0 6.3G 0% /run/user/0
#刪除開機自動掛載
[root@control4 cinder]# vi /etc/fstab /dev/sda4 /export/sda4 xfs defaults 0 0" #刪除開機自動掛載
#檢查是否卸載掉
[root@control4 cinder]# df -h Filesystem Size Used Avail Use% Mounted on devtmpfs 32G 0 32G 0% /dev tmpfs 32G 0 32G 0% /dev/shm tmpfs 32G 1.6M 32G 1% /run tmpfs 32G 0 32G 0% /sys/fs/cgroup /dev/sda3 100G 3.4G 97G 4% / tmpfs 32G 24M 32G 1% /tmp /dev/sda1 477M 127M 321M 29% /boot tmpfs 6.3G 0 6.3G 0% /run/user/0
#重新分區
[root@control4 cinder]# fdisk /dev/sda4 Welcome to fdisk (util-linux 2.28.2). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. Device /dev/sda4 already contains a xfs signature. The signature will be removed by a write command. Device does not contain a recognized partition table. Created a new DOS disklabel with disk identifier 0x1b5bc4ac. Command (m for help): n Partition type p primary (0 primary, 0 extended, 4 free) e extended (container for logical partitions) Select (default p): p Partition number (1-4, default 1): First sector (2048-718573231, default 2048): Last sector, +sectors or +size{K,M,G,T,P} (2048-718573231, default 718573231): Created a new partition 1 of type 'Linux' and of size 342.7 GiB. Command (m for help): t Selected partition 1 Partition type (type L to list all types): 8e Changed type of partition 'Linux' to 'Linux LVM'. Command (m for help): p Disk /dev/sda4: 342.7 GiB, 367909494784 bytes, 718573232 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0x1b5bc4ac Device Boot Start End Sectors Size Id Type /dev/sda4p1 2048 718573231 718571184 342.7G 8e Linux LVM Command (m for help): w The partition table has been altered. Calling ioctl() to re-read partition table. Re-reading the partition table failed.: Invalid argument The kernel still uses the old table. The new table will be used at the next reboot or after you run partprobe(8) or kpartx(8).
#刷新不用重啟直接生效
[root@control4 cinder]# partprobe
#Create the LVM physical volume /dev/sdb
:
[root@control4 cinder]# pvcreate /dev/sda4 WARNING: dos signature detected on /dev/sda4 at offset 510. Wipe it? [y/n]: y Wiping dos signature on /dev/sda4. Physical volume "/dev/sda4" successfully created.
#Create the LVM volume group cinder-volumes:
[root@control4 cinder]# vgcreate cinder-volumes /dev/sda4 Volume group "cinder-volumes" successfully created
三、openstack介紹
功能 | 項目名稱 | 描述 |
計算服務 | Nova | 負責虛擬機的創建、開關機、掛起、遷移、調整CPU、內存等規則。 |
對象存儲 | Swift | 用於在大規模可擴展系統中通過內置的冗余及高容差機制實現對象存儲的系統。 |
鏡像服務 | Glance | 用於創建、上傳、刪除、編輯鏡像信息的虛擬機鏡像查找及索引系統。 |
身份服務 | Keystone | 為其他的功能服務提供身份驗證、服務規則及服務令牌的功能。 |
網絡管理 | Neutron | 用於為其他服務提供雲計算的網絡虛擬化技術,可自定義各種網絡規則,支持主流的網絡廠商技術。 |
塊存儲 | Cinder | 為虛擬機實例提供穩定的數據塊存儲的創建、刪除、掛載、卸載、管理等服務。 |
圖形界面 | Horizon | 為用戶提供簡單易用的Web管理界面,降低用戶對功能服務的操作難度。 |
測量服務 | Ceilometer | 收集項目內所有的事件,用於監控、計費或為其他服務提供數據支撐。 |
部署編排 | Heat | 實現通過模板方式進行自動化的資源環境部署服務。 |
數據庫服務 | Trove | 為用戶提供可擴展的關系或非關系性數據庫服務。 |
四、安裝openstack
1、配置/etc/hosts (這里采用的是ntp時間服務器的方法)
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.0.0.17 controller computer ntpserver
2、配置時間同步
[root@control4 ~]# service crond restart Redirecting to /bin/systemctl restart crond.service [root@control4 ~]# service crond status Redirecting to /bin/systemctl status crond.service ● crond.service - Command Scheduler Loaded: loaded (/usr/lib/systemd/system/crond.service; enabled; vendor preset: enabled) Active: active (running) since Wed 2017-08-02 16:47:02 CST; 1min 7s ago Main PID: 4445 (crond) Tasks: 1 (limit: 9830) CGroup: /system.slice/crond.service └─4445 /usr/sbin/crond -n Aug 02 16:47:02 control4 systemd[1]: Started Command Scheduler. Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (Syslog will be used instead of sendmail.) Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 19% if used.) Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (running with inotify support) Aug 02 16:47:02 control4 crond[4445]: (CRON) INFO (@reboot jobs will be run at computer's startup.) [root@control4 sbin]# find / -name ntpdate /usr/share/bash-completion/completions/ntpdate [root@control4 sbin]# crontab -l */30 * * * * /usr/share/bash-completion/completions/ntpdate 10.0.100.208
方法二:搭建時間服務器
1、配置/etc/hosts
[root@control4 ~]# cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 10.0.0.17 controller computer ntpserver
2、安裝chrony
[root@control4 ~]# yum install chrony [root@control4 ~]# cp /etc/chrony.conf /etc/chrony.conf.bak [root@control4 ~]# vi /etc/chrony.conf server controller iburst driftfile /var/lib/chrony/drift makestep 1.0 3 rtcsync allow 10.0.0.0/24 logdir /var/log/chrony #設為開機自啟動 [root@control4 ~]# systemctl enable chronyd.service Created symlink /etc/systemd/system/multi-user.target.wants/chronyd.service → /usr/lib/systemd/system/chronyd.service. #啟動服務 [root@control4 ~]# systemctl start chronyd.service #驗證操作 [root@control4 ~]# chronyc sources 210 Number of sources = 1 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^? controller 0 6 0 - +0ns[ +0ns] +/- 0ns
五、安裝計算節點
計算服務 Nova 負責虛擬機的創建、開關機、掛起、遷移、調整CPU、內存等規則。
參考官方文檔:https://docs.openstack.org/newton/install-guide-rdo/environment-packages.html
1、安裝openstack源
yum install https://repos.fedorapeople.org/repos/openstack/openstack-newton/rdo-release-newton-5.noarch.rpm yum upgrade yum install python-openstackclient
2、計算節點 安裝mariadb數據庫
Install and configure components¶ Install the packages: # yum install mariadb mariadb-server python2-PyMySQL Create and edit the /etc/my.cnf.d/openstack.cnf file and complete the following actions: Create a [mysqld] section, and set the bind-address key to the management IP address of the controller node to enable access by other nodes via the management network. Set additional keys to enable useful options and the UTF-8 character set: [mysqld] bind-address = 10.0.0.17 default-storage-engine = innodb innodb_file_per_table max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8 Finalize installation¶ Start the database service and configure it to start when the system boots: # systemctl enable mariadb.service # systemctl start mariadb.service Secure the database service by running the mysql_secure_installation script. In particular, choose a suitable password for the database root account.
#查看服務
[root@control4 my.cnf.d]# netstat -lntup|grep mysqld tcp 0 0 10.0.0.17:3306 0.0.0.0:* LISTEN 40376/mysqld
#初始化mysql
[root@control4 my.cnf.d]# mysql_secure_installation NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB SERVERS IN PRODUCTION USE! PLEASE READ EACH STEP CAREFULLY! In order to log into MariaDB to secure it, we'll need the current password for the root user. If you've just installed MariaDB, and you haven't set the root password yet, the password will be blank, so you should just press enter here. Enter current password for root (enter for none): OK, successfully used password, moving on... Setting the root password ensures that nobody can log into the MariaDB root user without the proper authorisation. Set root password? [Y/n] y New password: Re-enter new password: Password updated successfully! Reloading privilege tables.. ... Success! By default, a MariaDB installation has an anonymous user, allowing anyone to log into MariaDB without having to have a user account created for them. This is intended only for testing, and to make the installation go a bit smoother. You should remove them before moving into a production environment. Remove anonymous users? [Y/n] y ... Success! Normally, root should only be allowed to connect from 'localhost'. This ensures that someone cannot guess at the root password from the network. Disallow root login remotely? [Y/n] y ... Success! By default, MariaDB comes with a database named 'test' that anyone can access. This is also intended only for testing, and should be removed before moving into a production environment. Remove test database and access to it? [Y/n] y - Dropping test database... ... Success! - Removing privileges on test database... ... Success! Reloading the privilege tables will ensure that all changes made so far will take effect immediately. Reload privilege tables now? [Y/n] y ... Success! Cleaning up... All done! If you've completed all of the above steps, your MariaDB installation should now be secure. Thanks for using MariaDB! [root@control4 my.cnf.d]# mysql -uroot -p Enter password: Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 11 Server version: 10.1.25-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | mysql | | performance_schema | +--------------------+ 3 rows in set (0.00 sec) MariaDB [(none)]> exit Bye
3、安裝rabbitmq
yum install rabbitmq-server -y
#啟動服務
systemctl enable rabbitmq-server.service systemctl start rabbitmq-server.service
#添加openstack用戶
[root@control4 ~]# rabbitmqctl add_user openstack hotdoor Creating user "openstack"
#允許用戶的配置,寫入和讀取訪問權限 openstack
[root@control4 ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*" Setting permissions for user "openstack" in vhost "/"
#添加用戶到administrator組
[root@control4 ~]# rabbitmqctl set_user_tags openstack administrator Setting tags for user "openstack" to [administrator] [root@control4 ~]# systemctl restart rabbitmq-server
#啟用管理插件頁面
[root@control4 ~]# rabbitmq-plugins enable rabbitmq_management The following plugins have been enabled: amqp_client cowlib cowboy rabbitmq_web_dispatch rabbitmq_management_agent rabbitmq_management Applying plugin configuration to rabbit@control4... started 6 plugins.
[root@control4 ~]# systemctl restart rabbitmq-server
#檢查端口是否開啟
[root@control4 ~]# lsof -i:15672 COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME beam.smp 43523 rabbitmq 54u IPv4 147406 0t0 TCP *:15672 (LISTEN)
#訪問RabbitMQ,訪問地址
登陸測試,連不上情況一般是防火牆沒有關閉所致,請自己行檢查。
4、安裝memcached
[root@control4 ~]# yum install memcached python-memcached -y
[root@control4 ~]# cp /etc/sysconfig/memcached /etc/sysconfig/memcached.bak
[root@control4 ~]# ll /etc/sysconfig/memcached*
-rw-r--r--. 1 root root 87 Nov 2 2016 /etc/sysconfig/memcached
-rw-r--r--. 1 root root 87 Aug 18 12:03 /etc/sysconfig/memcached.bak
[root@control4 ~]# vi /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 10.0.0.17,::1"
#啟動服務
[root@control4 ~]# systemctl enable memcached.service Created symlink /etc/systemd/system/multi-user.target.wants/memcached.service → /usr/lib/systemd/system/memcached.service. [root@control4 ~]# systemctl start memcached.service
#檢查服務是否啟動
[root@control4 ~]# ss -lntup|grep memcached udp UNCONN 0 0 10.0.0.17:11211 *:* users:(("memcached",pid=44897,fd=28)) udp UNCONN 0 0 ::1:11211 :::* users:(("memcached",pid=44897,fd=29)) tcp LISTEN 0 128 10.0.0.17:11211 *:* users:(("memcached",pid=44897,fd=26)) tcp LISTEN 0 128 ::1:11211 :::* users:(("memcached",pid=44897,fd=27))
五、安裝身份認證(keystone)
1、創建keystone數據庫
mysql -u root -p CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ IDENTIFIED BY '123456';
#查看添加的用戶
MariaDB [mysql]> select user,host from user; +----------+-----------+ | user | host | +----------+-----------+ | keystone | % | | root | 127.0.0.1 | | root | ::1 | | keystone | localhost | | root | localhost | +----------+-----------+ 5 rows in set (0.00 sec)
2、安裝openstack-keystone
yum install openstack-keystone httpd mod_wsgi -y
#修改配置文件
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
[root@control4 ~]# vi /etc/keystone/keystone.conf
[database] connection = mysql+pymysql://keystone:123456@controller/keystone [token] provider = fernet
#同步數據庫和初始化key
su -s /bin/sh -c "keystone-manage db_sync" keystone keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
#Bootstrap the Identity service
keystone-manage bootstrap --bootstrap-password 123456 \ --bootstrap-admin-url http://controller:35357/v3/ \ --bootstrap-internal-url http://controller:35357/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne
#配置apache
cp /etc/httpd/conf/httpd.conf /etc/httpd/conf/httpd.conf.bak vi /etc/httpd/conf/httpd.conf ServerName controller
#創建軟鏈接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
#啟動服務並檢查
[root@control4 ~]# systemctl enable httpd.service Created symlink /etc/systemd/system/multi-user.target.wants/httpd.service → /usr/lib/systemd/system/httpd.service. [root@control4 ~]# systemctl start httpd.service [root@control4 ~]# lsof -i:80 COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME httpd 45766 root 4u IPv6 149718 0t0 TCP *:http (LISTEN) httpd 45777 apache 4u IPv6 149718 0t0 TCP *:http (LISTEN) httpd 45778 apache 4u IPv6 149718 0t0 TCP *:http (LISTEN) httpd 45779 apache 4u IPv6 149718 0t0 TCP *:http (LISTEN) httpd 45781 apache 4u IPv6 149718 0t0 TCP *:http (LISTEN) httpd 45786 apache 4u IPv6 149718 0t0 TCP *:http (LISTEN)
#設置環境變量
export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_DOMAIN_NAME=Default export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3
執行效果如下:
3、Create a domain, projects, users, and roles
[root@control4 ~]# openstack project create --domain default \ > --description "Service Project" service +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Service Project | | domain_id | default | | enabled | True | | id | 4d53bb5db064416ba02284862df47c00 | | is_domain | False | | name | service | | parent_id | default | +-------------+----------------------------------+
[root@control4 ~]# openstack project create --domain default \ > --description "Demo Project" demo +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Demo Project | | domain_id | default | | enabled | True | | id | 03cc1f27dd3c42d4a5bf599432a5eefe | | is_domain | False | | name | demo | | parent_id | default | +-------------+----------------------------------+
[root@control4 ~]# openstack user create --domain default \ > --password-prompt demo User Password: Repeat User Password: #輸入密碼 +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 6d74d170ba314f7696c807bb2ce02ca2 | | name | demo | | password_expires_at | None | +---------------------+----------------------------------+
[root@control4 ~]# openstack role create user +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 2981e73e3c35458b81496dcf247741cf | | name | user | +-----------+----------------------------------+
[root@control4 ~]# openstack role add --project demo --user demo user
4、驗證操作(Verify operation)
#For security reasons, disable the temporary authentication token mechanism:
vi /etc/keystone/keystone-paste.ini [pipeline:public_api] 刪除掉: admin_token_auth [pipeline:admin_api] 刪除掉: admin_token_auth [pipeline:api_v3] 刪除掉: admin_token_auth
#取消設置臨時OS_AUTH_URL
和OS_PASSWORD
環境變量:
unset OS_AUTH_URL OS_PASSWORD
#這步執行時,需要輸入密碼
[root@control4 ~]# openstack --os-auth-url http://controller:35357/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name admin --os-username admin token issue Password: +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | expires | 2017-08-18 06:27:42+00:00 | | id | 1a839e5dc8cb42e9b9061416e4c135be | | project_id | f02a7283e7574e538aee9f0763780979 | | user_id | 9afda0f2d36e445695b2717e676d0548 | +------------+----------------------------------+
#這步執行時,需要輸入密碼
[root@control4 ~]# openstack --os-auth-url http://controller:5000/v3 \ > --os-project-domain-name Default --os-user-domain-name Default \ > --os-project-name demo --os-username demo token issue Password: +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | expires | 2017-08-18 06:29:35+00:00 | | id | 7c4ec617883a406cb98e27d1b18cc6d6 | | project_id | 03cc1f27dd3c42d4a5bf599432a5eefe | | user_id | 6d74d170ba314f7696c807bb2ce02ca2 | +------------+----------------------------------+
5、創建OpenStack客戶端環境腳本
[root@control4 ~]# vi admin export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:35357/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
[root@control4 ~]# vi demo export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=123456 export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2
#使用腳本
[root@control4 ~]# source admin [root@control4 ~]# openstack token issue +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | expires | 2017-08-18 06:39:40+00:00 | | id | 7fb684b09abc405699936998636bf58e | | project_id | f02a7283e7574e538aee9f0763780979 | | user_id | 9afda0f2d36e445695b2717e676d0548 | +------------+----------------------------------+
六、安裝配置Glance
1、創建數據庫
[root@control4 ~]# mysql -u root -p Enter password: Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 22 Server version: 10.1.25-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> CREATE DATABASE glance; Query OK, 1 row affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \ IDENTIFIED BY '123456'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \ IDENTIFIED BY '123456'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> flush privileges; Query OK, 0 rows affected (0.00 sec)
exit #退出數據庫
2、創建glance用戶
[root@control4 ~]# source admin [root@control4 ~]# openstack user create --domain default --password-prompt glance User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | e403ada42383417aaf9f74d0e02ae5f1 | | name | glance | | password_expires_at | None | +---------------------+----------------------------------+
3、將admin
角色添加到glance
用戶和 service
項目中
[root@control4 ~]# openstack role add --project service --user glance admin
4、創建glance實體
[root@control4 ~]# openstack service create --name glance \ > --description "OpenStack Image" image +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Image | | enabled | True | | id | 7f2abaeca7f44539a7d31bcbb01666b9 | | name | glance | | type | image | +-------------+----------------------------------+ [root@control4 ~]# [root@control4 ~]# openstack endpoint create --region RegionOne \ > image public http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 817e69c34b73446db70576c6bd69e700 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 7f2abaeca7f44539a7d31bcbb01666b9 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ [root@control4 ~]# [root@control4 ~]# openstack endpoint create --region RegionOne \ > image internal http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | c6a8a58436a04c488c462b20cc440f28 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 7f2abaeca7f44539a7d31bcbb01666b9 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+ [root@control4 ~]# [root@control4 ~]# openstack endpoint create --region RegionOne \ > image admin http://controller:9292 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | d9cb57549ea945748caa96faa14b3cda | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 7f2abaeca7f44539a7d31bcbb01666b9 | | service_name | glance | | service_type | image | | url | http://controller:9292 | +--------------+----------------------------------+
5、安裝glance配置組件
[root@control4 ~]# yum install openstack-glance
#備份配置文件
[root@control4 ~]# cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak [root@control4 ~]# ll -ld /etc/glance/glance-api.conf* -rw-r-----. 1 root glance 140377 Oct 6 2016 /etc/glance/glance-api.conf -rw-r-----. 1 root root 140377 Aug 18 14:01 /etc/glance/glance-api.conf.bak
#修改配置文件
#修改主機名稱 [root@control4 ~]# hostnamectl set-hostname controller [root@control4 ~]# hostname controller [root@control4 ~]# vi /etc/glance/glance-api.conf [database] connection = mysql+pymysql://glance:hotdoor#899@controller/glance #注意: @后面接的是主機名稱 [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = hotdoor#899 [paste_deploy] flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
6、修改glance-registry.conf配置文件
[root@control4 ~]# cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
[root@control4 ~]# vi /etc/glance/glance-registry.conf
[database] connection = mysql+pymysql://glance:hotdoor#899@controller/glance [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = glance password = hotdoor#899 [paste_deploy] flavor = keystone
#同步數據庫(這里會報個錯,不用管他)
[root@control4 ~]# su -s /bin/sh -c "glance-manage db_sync" glance Option "verbose" from group "DEFAULT" is deprecated for removal. Its value may be silently ignored in the future. /usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1171: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade expire_on_commit=expire_on_commit, _conf=conf) /usr/lib/python2.7/site-packages/pymysql/cursors.py:166: Warning: (1831, u'Duplicate index `ix_image_properties_image_id_name`. This is deprecated and will be disallowed in a future release.') result = self._query(query)
#啟動服務
[root@control4 ~]# systemctl enable openstack-glance-api.service \ > openstack-glance-registry.service Created symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service → /usr/lib/systemd/system/openstack-glance-api.service. Created symlink /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service → /usr/lib/systemd/system/openstack-glance-registry.service. [root@control4 ~]# [root@control4 ~]# systemctl start openstack-glance-api.service \ > openstack-glance-registry.service
7、驗證服務
#下載鏡像文件
wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
#查看鏡像
[root@control4 ~]# ll total 12992 -rw-r--r--. 1 root root 267 Aug 18 13:32 admin -rw-------. 1 root root 1777 Aug 17 20:00 anaconda-ks.cfg -rw-r--r--. 1 root root 13287936 May 8 2015 cirros-0.3.4-x86_64-disk.img -rw-r--r--. 1 root root 264 Aug 18 13:33 demo
#驗證操作
[root@control4 ~]# source admin [root@control4 ~]# openstack image create "cirros" \ > --file cirros-0.3.4-x86_64-disk.img \ > --disk-format qcow2 --container-format bare \ > --public +------------------+------------------------------------------------------+ | Field | Value | +------------------+------------------------------------------------------+ | checksum | ee1eca47dc88f4879d8a229cc70a07c6 | | container_format | bare | | created_at | 2017-08-18T06:32:49Z | | disk_format | qcow2 | | file | /v2/images/6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863/file | | id | 6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863 | | min_disk | 0 | | min_ram | 0 | | name | cirros | | owner | f02a7283e7574e538aee9f0763780979 | | protected | False | | schema | /v2/schemas/image | | size | 13287936 | | status | active | | tags | | | updated_at | 2017-08-18T06:32:49Z | | virtual_size | None | | visibility | public | +------------------+------------------------------------------------------+
#查看鏡像列表
[root@control4 ~]# openstack image list +--------------------------------------+--------+--------+ | ID | Name | Status | +--------------------------------------+--------+--------+ | 6a4ba2ce-ba0a-4e8e-a9dc-efb8c72b7863 | cirros | active | +--------------------------------------+--------+--------+
七、安裝和配置控制器節點Nova
本節介紹如何在控制器節點上安裝和配置代號為Nova的Compute服務。
1、創建數據庫
[root@control4 ~]# mysql -uroot -p Enter password: Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 31 Server version: 10.1.25-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> CREATE DATABASE nova_api; Query OK, 1 row affected (0.00 sec) MariaDB [(none)]> CREATE DATABASE nova; Query OK, 1 row affected (0.00 sec) MariaDB [(none)]> show databases; +--------------------+ | Database | +--------------------+ | glance | | information_schema | | keystone | | mysql | | nova | | nova_api | | performance_schema | +--------------------+ 7 rows in set (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \ -> IDENTIFIED BY 'hotdoor#899'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \ -> IDENTIFIED BY 'hotdoor#899'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \ -> IDENTIFIED BY 'hotdoor#899'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \ -> IDENTIFIED BY 'hotdoor#899'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> flush privileges; Query OK, 0 rows affected (0.00 sec)
exit #退出數據庫
2、創建nova用戶
[root@control4 ~]# openstack user create --domain default \ > --password-prompt nova User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | baef83a42538487390a525b3f1a9793b | | name | nova | | password_expires_at | None | +---------------------+----------------------------------+
3、創建nova用戶
[root@control4 ~]# openstack role add --project service --user nova admin [root@control4 ~]# [root@control4 ~]# openstack service create --name nova \ > --description "OpenStack Compute" compute +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Compute | | enabled | True | | id | f31c3500c136482fbb962c01feda5350 | | name | nova | | type | compute | +-------------+----------------------------------+ [root@control4 ~]# openstack endpoint create --region RegionOne \ > compute public http://controller:8774/v2.1/%\(tenant_id\)s +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | be0735691ed540249fff9c6c1baf8355 | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | f31c3500c136482fbb962c01feda5350 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1/%(tenant_id)s | +--------------+-------------------------------------------+ [root@control4 ~]# [root@control4 ~]# openstack endpoint create --region RegionOne \ > compute internal http://controller:8774/v2.1/%\(tenant_id\)s +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 06dcbfc21fbd45a0bcc6fe74b215ffa8 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | f31c3500c136482fbb962c01feda5350 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1/%(tenant_id)s | +--------------+-------------------------------------------+ [root@control4 ~]# [root@control4 ~]# openstack endpoint create --region RegionOne \ > compute admin http://controller:8774/v2.1/%\(tenant_id\)s +--------------+-------------------------------------------+ | Field | Value | +--------------+-------------------------------------------+ | enabled | True | | id | 72645b92a9284611bf699a90f12630ae | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | f31c3500c136482fbb962c01feda5350 | | service_name | nova | | service_type | compute | | url | http://controller:8774/v2.1/%(tenant_id)s | +--------------+-------------------------------------------+
4、安裝nova相關軟件包
yum install openstack-nova-api openstack-nova-conductor \ openstack-nova-console openstack-nova-novncproxy \ openstack-nova-scheduler
5、修改配置文件
[root@control4 nova]# cat nova.conf|grep -v "^#"|grep -v "^$" [DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:hotdoor@controller auth_strategy = keystone my_ip = 10.0.0.17 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:hotdoor@controller/nova_api [barbican] [cache] [cells] [cinder] [cloudpipe] [conductor] [cors] [cors.subdomain] [crypto] [database] connection = mysql+pymysql://nova:hotdoor@controller/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [key_manager] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = hotdoor#899 [libvirt] [matchmaker_redis] [metrics] [mks] [neutron] [osapi_v21] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [placement] [placement_database] [rdp] [remote_debug] [serial_console] [spice] [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] vncserver_listen = 10.0.0.17 vncserver_proxyclient_address = 10.0.0.17 [workarounds] [wsgi] [xenserver] [xvp]
6、同步數據庫
su -s /bin/sh -c "nova-manage api_db sync" nova su -s /bin/sh -c "nova-manage db sync" nova
7、重啟服務並設為開機自啟動
systemctl enable openstack-nova-api.service \ openstack-nova-consoleauth.service openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service systemctl start openstack-nova-api.service \ openstack-nova-consoleauth.service openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service
#遇到的問題
坑一:rabbitmq的密碼不能有特殊符號,否則同步就會出現。(一定要記住openstack搭建時,密碼一定不能用包含特殊符號。)
[root@control4 nova]# su -s /bin/sh -c "nova-manage api_db sync" nova Traceback (most recent call last): File "/usr/bin/nova-manage", line 10, in <module> sys.exit(main()) File "/usr/lib/python2.7/site-packages/nova/cmd/manage.py", line 1580, in main config.parse_args(sys.argv) File "/usr/lib/python2.7/site-packages/nova/config.py", line 50, in parse_args rpc.init(CONF) File "/usr/lib/python2.7/site-packages/nova/rpc.py", line 76, in init aliases=TRANSPORT_ALIASES) File "/usr/lib/python2.7/site-packages/oslo_messaging/transport.py", line 182, in get_transport url = TransportURL.parse(conf, url, aliases) File "/usr/lib/python2.7/site-packages/oslo_messaging/transport.py", line 459, in parse port = int(port) ValueError: invalid literal for int() with base 10: 'hotdoor'
#解決方法
rabbitmq的密碼不能有特殊符號,否則就會報上面這個錯誤。解決辦法,就是修改rabbitmq密碼。
transport_url = rabbit://openstack:hotdoor#899@controller #被這個密碼坑了好久,一定不能用特殊符號。
#rabbitmq 修改密碼方法
[root@control4 nova]# rabbitmqctl change_password openstack hotdoor Error: unable to connect to node rabbit@control4: nodedown DIAGNOSTICS =========== attempted to contact: [rabbit@control4] rabbit@control4: * unable to connect to epmd (port 4369) on control4: nxdomain (non-existing domain) current node details: - node name: 'rabbitmq-cli-94@controller' - home dir: /var/lib/rabbitmq - cookie hash: fFocvmbatbiHNvfPZ4D/Yw==
#修改密碼后,一定要記得密
[root@control4 ~]# vi /etc/nova/nova.conf transport_url = rabbit://openstack:hotdoor@controller #記得改這個密碼
坑二:出現這種情況是正常的,直接忽略
[root@control4 nova]# su -s /bin/sh -c "nova-manage db sync" nova WARNING: cell0 mapping not found - not syncing cell0.
八、安裝配置計算節點(Install and configure a compute node)
說明:在多集群環境中,可以配置多台計算節點(與控制節點分離)
參考:https://docs.openstack.org/newton/install-guide-rdo/nova-compute-install.html
#先提前處理下面這個問題,否則計算節點安裝不成功。
坑一:iptables系統版本太高,需要先卸載,再安裝更低的版本(iptables-services-1.6.0-2.fc25.x86_64)
[root@controller ~]# yum install openstack-nova-compute -y
Error: transaction check vs depsolve: iptables = 1.6.0-2.fc25 is needed by iptables-services-1.6.0-2.fc25.x86_64 To diagnose the problem, try running: 'rpm -Va --nofiles --nodigest'. You probably have corrupted RPMDB, running 'rpm --rebuilddb' might fix the issue. The downloaded packages were saved in cache until the next successful transaction. You can remove cached packages by executing 'dnf clean packages'. [root@controller ~]# rpm -qa iptables* iptables-1.6.0-3.fc25.x86_64 iptables-libs-1.6.0-3.fc25.x86_64
解決方法:
#查看版本
[root@controller ~]# rpm -qa iptables* iptables-1.6.0-3.fc25.x86_64 iptables-libs-1.6.0-3.fc25.x86_64
#卸載舊版本
[root@controller nova]# rpm -e iptables --nodeps [root@controller nova]# rpm -e iptables-libs --nodeps [root@controller nova]# rpm -qa iptables*
#安裝上面提示的版本
#先安裝依賴包 yum install iptables-libs-1.6.0-2.fc25.x86_64 -y #再安裝iptables yum install iptables-1.6.0-2.fc25.x86_64 -y
#安裝計算節點 (compute node),先備份原配置文件,再修改。
#安裝計算節點
yum install openstack-nova-compute -y
#備份配置文件
[root@controller ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.01.bak [root@controller ~]# ll -ld /etc/nova/nova.conf* -rw-r-----. 1 root nova 290463 Aug 18 18:54 /etc/nova/nova.conf -rw-r-----. 1 root root 290463 Aug 19 12:07 /etc/nova/nova.conf.01.bak 第一階段配置好的 -rw-r-----. 1 root root 289748 Aug 18 15:32 /etc/nova/nova.conf.old #原始配置文件
#修改配置文件
[root@controller ~]# vi /etc/nova/nova.conf [DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:hotdoor@controller auth_strategy = keystone my_ip = 10.0.0.17 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:hotdoor@controller/nova_api [barbican] [cache] [cells] [cinder] [cloudpipe] [conductor] [cors] [cors.subdomain] [crypto] [database] connection = mysql+pymysql://nova:hotdoor@controller/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [key_manager] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = hotdoor#899 [libvirt] virt_type=kvm [matchmaker_redis] [metrics] [mks] [neutron] [osapi_v21] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [placement] [placement_database] [rdp] [remote_debug] [serial_console] [spice] [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] enabled = True vncserver_listen = 0.0.0.0 vncserver_proxyclient_address = 10.0.0.17 novncproxy_base_url = http://controller:6080/vnc_auto.html [workarounds] [wsgi] [xenserver] [xvp]
#坑二:
注意: 如果nova-compute服務無法啟動,請檢查 /var/log/nova/nova-compute.log。錯誤消息 可能表示控制器節點上的防火牆阻止訪問端口5672。
AMQP server on controller:5672 is unreachable。
解決方法:
添加一條防火牆規則
#添加一條amqp防火牆規則
iptables -A INPUT -p tcp --dport 5672 -j ACCEPT iptables-save
#查看是否添加成功 [root@controller ~]# iptables -L Chain INPUT (policy ACCEPT) target prot opt source destination ACCEPT udp -- anywhere anywhere udp dpt:domain ACCEPT tcp -- anywhere anywhere tcp dpt:domain ACCEPT udp -- anywhere anywhere udp dpt:bootps ACCEPT tcp -- anywhere anywhere tcp dpt:bootps nova-api-INPUT all -- anywhere anywhere ACCEPT tcp -- anywhere anywhere tcp dpt:amqp
#啟動服務
systemctl enable libvirtd.service openstack-nova-compute.service systemctl start libvirtd.service openstack-nova-compute.service
#備注:一定要詳細查看服務是否有紅色的地方(有就說明配置文件配置的有問題)
[root@controller ~]# systemctl status libvirtd.service openstack-nova-compute.service ● libvirtd.service - Virtualization daemon Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled) Active: active (running) since Sat 2017-08-19 12:52:51 CST; 7s ago Docs: man:libvirtd(8) http://libvirt.org Main PID: 16902 (libvirtd) Tasks: 18 (limit: 9830) CGroup: /system.slice/libvirtd.service ├─ 3192 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc ├─ 3193 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc └─16902 /usr/sbin/libvirtd Aug 19 12:52:51 controller systemd[1]: Starting Virtualization daemon... Aug 19 12:52:51 controller systemd[1]: Started Virtualization daemon. Aug 19 12:52:51 controller dnsmasq[3192]: read /etc/hosts - 3 addresses Aug 19 12:52:51 controller dnsmasq[3192]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses Aug 19 12:52:51 controller dnsmasq-dhcp[3192]: read /var/lib/libvirt/dnsmasq/default.hostsfile ● openstack-nova-compute.service - OpenStack Nova Compute Server Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled Active: active (running) since Sat 2017-08-19 12:52:54 CST; 4s ago Main PID: 16921 (nova-compute) Tasks: 22 (limit: 9830) CGroup: /system.slice/openstack-nova-compute.service └─16921 /usr/bin/python2 /usr/bin/nova-compute Aug 19 12:52:51 controller systemd[1]: Starting OpenStack Nova Compute Server... Aug 19 12:52:54 controller systemd[1]: Started OpenStack Nova Compute Server. ...skipping... ● libvirtd.service - Virtualization daemon Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled) Active: active (running) since Sat 2017-08-19 12:52:51 CST; 7s ago Docs: man:libvirtd(8) http://libvirt.org Main PID: 16902 (libvirtd) Tasks: 18 (limit: 9830) CGroup: /system.slice/libvirtd.service ├─ 3192 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc ├─ 3193 /sbin/dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/default.conf --leasefile-ro --dhcp-sc └─16902 /usr/sbin/libvirtd Aug 19 12:52:51 controller systemd[1]: Starting Virtualization daemon... Aug 19 12:52:51 controller systemd[1]: Started Virtualization daemon. Aug 19 12:52:51 controller dnsmasq[3192]: read /etc/hosts - 3 addresses Aug 19 12:52:51 controller dnsmasq[3192]: read /var/lib/libvirt/dnsmasq/default.addnhosts - 0 addresses Aug 19 12:52:51 controller dnsmasq-dhcp[3192]: read /var/lib/libvirt/dnsmasq/default.hostsfile ● openstack-nova-compute.service - OpenStack Nova Compute Server Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled Active: active (running) since Sat 2017-08-19 12:52:54 CST; 4s ago Main PID: 16921 (nova-compute) Tasks: 22 (limit: 9830) CGroup: /system.slice/openstack-nova-compute.service └─16921 /usr/bin/python2 /usr/bin/nova-compute Aug 19 12:52:51 controller systemd[1]: Starting OpenStack Nova Compute Server... Aug 19 12:52:54 controller systemd[1]: Started OpenStack Nova Compute Server.
#驗證操作
[root@controller ~]# source admin [root@controller ~]# openstack compute service list +----+------------------+------------+----------+---------+-------+----------------------------+ | ID | Binary | Host | Zone | Status | State | Updated At | +----+------------------+------------+----------+---------+-------+----------------------------+ | 1 | nova-consoleauth | controller | internal | enabled | up | 2017-08-19T05:00:45.000000 | | 2 | nova-conductor | controller | internal | enabled | up | 2017-08-19T05:00:47.000000 | | 6 | nova-scheduler | controller | internal | enabled | up | 2017-08-19T05:00:42.000000 | | 14 | nova-compute | controller | nova | enabled | up | 2017-08-19T05:00:40.000000 | +----+------------------+------------+----------+---------+-------+----------------------------+
九、控制節點安裝配置Neutron (Networking service )
參考:https://docs.openstack.org/newton/install-guide-rdo/neutron-controller-install.html
1.網絡服務介紹
OpenStack network service(neutron):是opensatck核心組件之一,提供雲計算環境下的虛擬網絡功能。
2.網絡服務組件介紹
neutron-server:這一部分包含守護進程neutron-server 和各種插件 neutron-*-plugin,它們既可以安裝在控制節點也可以安裝在網絡節點。neutron-server 提供 API 接口,並把對 API 的調用請求傳給已經配置好的插件進行后續處理。插件需要訪問數據庫來維護各種配置數據和對應關系,例如路由器、網絡、子網、端口、浮動IP、安全組等等。
openstack網絡插件和代理:Plug and unplug ports, create networks or subnets, and provide IP addressing. These plug-ins and agents differ depending on the vendor and technologies used in the particular cloud. OpenStack Networking ships with plug-ins and agents for Cisco virtual and physical switches, NEC OpenFlow products, Open vSwitch, Linux bridging, and the VMware NSX product.
常見的代理L3(3層),DHCP(動態主機IP地址),以及插件代理。
詳解opensatck網絡插件和代理:
插件代理(Plugin Agent):虛擬網絡上的數據包的處理則是由這些插件代理來完成的。名字為 neutron-*-agent。在每個計算節點和網絡節點上運行。一般來說你選擇了什么插件,就需要選擇相應的代理。代理與 Neutron Server 及其插件的交互就通過消息隊列來支持。
提示:插件代理需要部署在每一個運行hypervisor的主機上,它提供本地的vSwitch配置,更多的時候得依賴你具體所使用的插件類型。(常用的插件是OpenvSwitch,還包括Big Switch,Floodinght REST Proxy,Brocade, NSX,
PLUMgrid, Ryu)
DHCP 代理(DHCP Agent):名字為 neutron-dhcp-agent,為各個租戶網絡提供 DHCP 服務,部署在網絡節點上,各個插件也是使用這一個代理。DHCP代理,給租戶網絡提供動態主機配置服務,主要用途是為租戶網絡內的虛擬機動態地分配IP地址。
3 層代理(L3 Agent):名字為 neutron-l3-agent, 為客戶機訪問外部網絡提供 3 層轉發服務。也部署在網絡節點上。提供三層網絡功能和網絡地址轉換(NAT)功能,來讓租戶的虛擬機可以與外部網絡通信。
下面這張圖很好的反映了 Neutron 內部各部分服務之間的關系。
neutron服務進程運行圖如下:
消息隊列:大多數的OpenStack Networking安裝都會用到,用於在neutron-server和各種各樣的代理進程間路由信息。也為某些特定的插件扮演數據庫的角色,以存儲網絡狀態。
3.安裝並配置控制節點(Install and configure controller node)
1.先決條件
創建數據庫並授權:
[root@controller ~]# mysql -uroot -p Enter password: Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 762 Server version: 10.1.25-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [(none)]> CREATE DATABASE neutron; Query OK, 1 row affected (0.01 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \ -> IDENTIFIED BY 'hotdoor'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \ -> IDENTIFIED BY 'hotdoor'; Query OK, 0 rows affected (0.00 sec) MariaDB [(none)]> exit Bye
2.獲得admin賬號權限
source openrc
3.創建neutron用戶並添加角色到admin
[root@controller ~]# source admin [root@controller ~]# openstack user create --domain default --password-prompt neutron User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | ef2ecdc2aa5442c083e22dd05149f7b2 | | name | neutron | | password_expires_at | None | +---------------------+----------------------------------+ [root@controller ~]# openstack role add --project service --user neutron admin [root@controller ~]# openstack service create --name neutron \ > --description "OpenStack Networking" network +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Networking | | enabled | True | | id | d4beb86cafd845968641bb85876cb255 | | name | neutron | | type | network | +-------------+----------------------------------+ [root@controller ~]# openstack endpoint create --region RegionOne \ > network public http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 7147205d0858453fbce949213d71ac6b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | d4beb86cafd845968641bb85876cb255 | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ [root@controller ~]# [root@controller ~]# openstack endpoint create --region RegionOne \ > network internal http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 4bf1515393574b54962faa0d861ba676 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | d4beb86cafd845968641bb85876cb255 | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ [root@controller ~]# [root@controller ~]# openstack endpoint create --region RegionOne \ > network admin http://controller:9696 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 2ee404d255b34d5d89b1a6bfb7cff3ce | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | d4beb86cafd845968641bb85876cb255 | | service_name | neutron | | service_type | network | | url | http://controller:9696 | +--------------+----------------------------------+ [root@controller ~]#
4.安裝軟件
[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 \ > openstack-neutron-linuxbridge ebtables -y
#注意這里有個坑:執行一次發現沒有安裝。需要執行二遍上面的命令才能安裝成功。否則會找不到配置文件。
Installed: conntrack-tools.x86_64 1.4.3-1.fc25 dibbler-client.x86_64 1.0.1-3.fc25 dnsmasq-utils.x86_64 2.76-2.fc25 keepalived.x86_64 1.3.5-1.fc25 libnetfilter_cthelper.x86_64 1.0.0-9.fc24 libnetfilter_cttimeout.x86_64 1.0.0-7.fc24 libnetfilter_queue.x86_64 1.0.2-7.fc24 libsodium.x86_64 1.0.13-1.fc25 libxslt-python.x86_64 1.1.28-13.fc25 lm_sensors-libs.x86_64 3.4.0-5.fc25 net-snmp-agent-libs.x86_64 1:5.7.3-15.fc25 openpgm.x86_64 5.2.122-6.fc24 openstack-neutron.noarch 1:9.4.0-1.el7 openstack-neutron-common.noarch 1:9.4.0-1.el7 openstack-neutron-linuxbridge.noarch 1:9.4.0-1.el7 openstack-neutron-ml2.noarch 1:9.4.0-1.el7 python-libxml2.x86_64 2.9.4-2.fc25 python-logutils.noarch 0.3.3-7.fc25 python-ncclient.noarch 0.4.7-2.fc25 python-neutron.noarch 1:9.4.0-1.el7 python-neutron-lib.noarch 0.4.0-1.el7 python-openvswitch.noarch 1:2.6.1-4.1.git20161206.el7 python-ryu-common.noarch 4.9-2.el7 python-webtest.noarch 2.0.23-1.fc25 python-werkzeug.noarch 0.11.10-2.fc25 python2-designateclient.noarch 2.3.0-1.el7 python2-gevent.x86_64 1.1.2-2.el7 python2-pecan.noarch 1.1.2-1.fc25 python2-ryu.noarch 4.9-2.el7 python2-tinyrpc.noarch 0.5-4.20170523git1f38ac.el7 python2-waitress.noarch 0.9.0-4.fc25 python2-zmq.x86_64 15.3.0-2.fc25 zeromq.x86_64 4.1.4-5.fc25 Complete! #看到這個才是安裝成功啦。
5.配置服務組件
注意:默認配置文件在各發行版本中可能不同。你可能需要添加這些部分,選項而不是修改已經存在的部分和選項。另外,在配置片段中的省略號(...
)表示默認的配置選項你應該保留。
這里有兩個網絡模式可以選擇,這里我們選擇第二種網絡模式,因為他支持vxlan。
配置參考:https://docs.openstack.org/newton/install-guide-rdo/neutron-controller-install-option2.html
1.先備份配置文件,再編輯文件/etc/neutron/neutron.conf
[root@controller ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.old
#修改配置文件
[root@controller ~]#cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.old
[root@controller ~]# vi /etc/neutron/neutron.conf
[DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = True transport_url = rabbit://openstack:hotdoor@controller auth_strategy = keystone notify_nova_on_port_status_changes = True notify_nova_on_port_data_changes = True [agent] [cors] [cors.subdomain] [database] connection = mysql+pymysql://neutron:hotdoor@controller/neutron [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = neutron password = hotdoor#899 [matchmaker_redis] [nova] auth_url = http://controller:35357 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = nova password = hotdoor [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [qos] [quotas] [ssl]
2、Configure the Linux bridge agent
[root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.old
[root@controller ~]# vi /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT] [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = provider [ml2_type_geneve] [ml2_type_gre] [ml2_type_vlan] [ml2_type_vxlan] vni_ranges = 1:1000 [securitygroup] enable_ipset = True
3、Configure the Linux bridge agent
注釋:Linuxbridge代理為實例建立layer-2虛擬網絡並且處理安全組規則
修改配置文件:/etc/neutron/plugins/ml2/linuxbridge_agent.ini
在``[linux_bridge]``部分,將公共虛擬網絡和公共物理網絡接口對應起來:
[linux_bridge]
physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
將``PUBLIC_INTERFACE_NAME`` 替換為底層的物理公共網絡接口。請查看:ref:environment-networking for more information。
簡單的理解就是一個提供上網的物理網卡
[root@controller ~]# ifconfig
#提供上網的物理網卡,我們這里用eno1
eno1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.0.100.210 netmask 255.255.255.0 broadcast 10.0.100.255 inet6 fe80::55d6:7864:68db:e9e5 prefixlen 64 scopeid 0x20<link> ether 2c:59:e5:47:a8:c8 txqueuelen 1000 (Ethernet) RX packets 13005 bytes 9435494 (8.9 MiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 4556 bytes 448987 (438.4 KiB) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 device memory 0xf5e00000-f5efffff eno2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500 inet 10.0.0.17 netmask 255.255.255.0 broadcast 10.0.0.255 inet6 fe80::8ce1:1156:c667:b371 prefixlen 64 scopeid 0x20<link> ether 2c:59:e5:47:a8:c9 txqueuelen 1000 (Ethernet) RX packets 4870 bytes 463595 (452.7 KiB) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 10 bytes 716 (716.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 device memory 0xf5c00000-f5cfffff
[root@controller ml2]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.old
[root@controller ml2]# vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT] [agent] [linux_bridge] physical_interface_mappings = provider:eno1 #eno1 配能上外網的網段 [securitygroup] enable_security_group = True firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver [vxlan] enable_vxlan = True local_ip = 10.0.0.17 #配內網網段 l2_population = True
4、Configure the layer-3 agent
[root@controller ~]# cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.old
[root@controller ~]# vi /etc/neutron/l3_agent.ini
[DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver [AGENT]
5、Configure the DHCP agent
[root@controller ~]# vi /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.old
[root@controller ~]# vi /etc/neutron/l3_agent.ini
[DEFAULT] nterface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = True [AGENT]
#配置完成,返回到控制節點頁面。
6、Configure the metadata agent
[root@controller ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.old
[root@controller ~]# vi /etc/neutron/metadata_agent.ini
[DEFAULT] nova_metadata_ip = controller metadata_proxy_shared_secret = METADATA_SECRET [AGENT] [cache]
7、Configure the Compute service to use the Networking service
#配置完成過濾看一下: cat /etc/nova/nova.conf|grep -v "^#"|grep -v "^$"
vi /etc/nova/nova.conf
[DEFAULT] enabled_apis = osapi_compute,metadata transport_url = rabbit://openstack:hotdoor@controller auth_strategy = keystone my_ip = 10.0.0.17 use_neutron = True firewall_driver = nova.virt.firewall.NoopFirewallDriver [api_database] connection = mysql+pymysql://nova:hotdoor@controller/nova_api [barbican] [cache] [cells] [cinder] [cloudpipe] [conductor] [cors] [cors.subdomain] [crypto] [database] connection = mysql+pymysql://nova:hotdoor@controller/nova [ephemeral_storage_encryption] [glance] api_servers = http://controller:9292 [guestfs] [hyperv] [image_file_url] [ironic] [key_manager] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = nova password = hotdoor#899 [libvirt] virt_type=kvm [matchmaker_redis] [metrics] [mks] [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = Default user_domain_name = Default region_name = RegionOne project_name = service username = neutron password = hotdoor service_metadata_proxy = True metadata_proxy_shared_secret = METADATA_SECRET [osapi_v21] [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [placement] [placement_database] [rdp] [remote_debug] [serial_console] [spice] [ssl] [trusted_computing] [upgrade_levels] [vmware] [vnc] enabled = True vncserver_listen = 0.0.0.0 vncserver_proxyclient_address = 10.0.0.17 novncproxy_base_url = http://controller:6080/vnc_auto.html [workarounds] [wsgi] [xenserver] [xvp]
完成安裝。
8、網絡服務初始化腳本需要一個超鏈接 /etc/neutron/plugin.ini``指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini``。如果超鏈接不存在,使用下面的命令創建它:
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
9、同步數據庫
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
#檢查數據是否同步成功,看表中是否有數據
[root@controller ~]# mysql -uneutron -p Enter password: Welcome to the MariaDB monitor. Commands end with ; or \g. Your MariaDB connection id is 104 Server version: 10.1.25-MariaDB MariaDB Server Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. MariaDB [neutron]> show databases; +--------------------+ | Database | +--------------------+ | information_schema | | neutron | +--------------------+ 2 rows in set (0.00 sec) MariaDB [neutron]> use neutron Database changed MariaDB [neutron]> show tables; +-----------------------------------------+ | Tables_in_neutron | +-----------------------------------------+ | address_scopes | | agents | | alembic_version | | allowedaddresspairs | | arista_provisioned_nets | | arista_provisioned_tenants | | arista_provisioned_vms | | auto_allocated_topologies | | bgp_peers | | bgp_speaker_dragent_bindings | | bgp_speaker_network_bindings | | bgp_speaker_peer_bindings | | bgp_speakers | | brocadenetworks | | brocadeports | | cisco_csr_identifier_map | | cisco_hosting_devices | | cisco_ml2_apic_contracts | | cisco_ml2_apic_host_links | | cisco_ml2_apic_names | | cisco_ml2_n1kv_network_bindings | | cisco_ml2_n1kv_network_profiles | | cisco_ml2_n1kv_policy_profiles | | cisco_ml2_n1kv_port_bindings | | cisco_ml2_n1kv_profile_bindings | | cisco_ml2_n1kv_vlan_allocations | | cisco_ml2_n1kv_vxlan_allocations | | cisco_ml2_nexus_nve | | cisco_ml2_nexusport_bindings | | cisco_port_mappings | | cisco_router_mappings | | consistencyhashes | | default_security_group | | dnsnameservers | | dvr_host_macs | | externalnetworks | | extradhcpopts | | firewall_policies | | firewall_rules | | firewalls | | flavors | | flavorserviceprofilebindings | | floatingipdnses | | floatingips | | ha_router_agent_port_bindings | | ha_router_networks | | ha_router_vrid_allocations | | healthmonitors | | ikepolicies | | ipallocationpools | | ipallocations | | ipamallocationpools | | ipamallocations | | ipamsubnets | | ipsec_site_connections | | ipsecpeercidrs | | ipsecpolicies | | lsn | | lsn_port | | maclearningstates | | members | | meteringlabelrules | | meteringlabels | | ml2_brocadenetworks | | ml2_brocadeports | | ml2_distributed_port_bindings | | ml2_flat_allocations | | ml2_geneve_allocations | | ml2_geneve_endpoints | | ml2_gre_allocations | | ml2_gre_endpoints | | ml2_nexus_vxlan_allocations | | ml2_nexus_vxlan_mcast_groups | | ml2_port_binding_levels | | ml2_port_bindings | | ml2_ucsm_port_profiles | | ml2_vlan_allocations | | ml2_vxlan_allocations | | ml2_vxlan_endpoints | | multi_provider_networks | | networkconnections | | networkdhcpagentbindings | | networkdnsdomains | | networkgatewaydevicereferences | | networkgatewaydevices | | networkgateways | | networkqueuemappings | | networkrbacs | | networks | | networksecuritybindings | | networksegments | | neutron_nsx_network_mappings | | neutron_nsx_port_mappings | | neutron_nsx_router_mappings | | neutron_nsx_security_group_mappings | | nexthops | | nsxv_edge_dhcp_static_bindings | | nsxv_edge_vnic_bindings | | nsxv_firewall_rule_bindings | | nsxv_internal_edges | | nsxv_internal_networks | | nsxv_port_index_mappings | | nsxv_port_vnic_mappings | | nsxv_router_bindings | | nsxv_router_ext_attributes | | nsxv_rule_mappings | | nsxv_security_group_section_mappings | | nsxv_spoofguard_policy_network_mappings | | nsxv_tz_network_bindings | | nsxv_vdr_dhcp_bindings | | nuage_net_partition_router_mapping | | nuage_net_partitions | | nuage_provider_net_bindings | | nuage_subnet_l2dom_mapping | | poolloadbalanceragentbindings | | poolmonitorassociations | | pools | | poolstatisticss | | portbindingports | | portdnses | | portqueuemappings | | ports | | portsecuritybindings | | providerresourceassociations | | provisioningblocks | | qos_bandwidth_limit_rules | | qos_dscp_marking_rules | | qos_minimum_bandwidth_rules | | qos_network_policy_bindings | | qos_policies | | qos_port_policy_bindings | | qospolicyrbacs | | qosqueues | | quotas | | quotausages | | reservations | | resourcedeltas | | router_extra_attributes | | routerl3agentbindings | | routerports | | routerroutes | | routerrules | | routers | | securitygroupportbindings | | securitygrouprules | | securitygroups | | segmenthostmappings | | serviceprofiles | | sessionpersistences | | standardattributes | | subnet_service_types | | subnetpoolprefixes | | subnetpools | | subnetroutes | | subnets | | subports | | tags | | trunks | | tz_network_bindings | | vcns_router_bindings | | vips | | vpnservices | +-----------------------------------------+ 162 rows in set (0.00 sec) MariaDB [neutron]> exit
10、Restart the Compute API service
systemctl restart openstack-nova-api.service
11、Start the Networking services and configure them to start when the system boots.
For both networking options:
systemctl enable neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service systemctl start neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service
12、For networking option 2, also enable and start the layer-3 service:
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service
安裝完成。
十一、Install and configure compute node(安裝和配置計算節點)
地址: https://docs.openstack.org/newton/install-guide-rdo/neutron-compute-install.html
如果openstack所有服務都安裝在一台服務器上面,那么就不需要配置這步。因為controller node 與 compute nod的/etc/neutron/neutron.conf配置文件參數一樣。所以不需要再配置。
#檢查服務狀態是否正常
[root@controller ~]# rpm -qa openstack-neutron-linuxbridge ebtables ipset ebtables-2.0.10-21.fc25.x86_64 ipset-6.29-1.fc25.x86_64 openstack-neutron-linuxbridge-9.4.0-1.el7.noarch [root@controller ~]# systemctl status openstack-nova-compute.service ● openstack-nova-compute.service - OpenStack Nova Compute Server Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled) Active: activating (start) since Sat 2017-08-19 14:00:11 CST; 2h 3min ago Main PID: 1587 (nova-compute) Tasks: 1 (limit: 9830) CGroup: /system.slice/openstack-nova-compute.service └─1587 /usr/bin/python2 /usr/bin/nova-compute Aug 19 14:00:11 controller systemd[1]: Starting OpenStack Nova Compute Server... [root@controller ~]# systemctl status neutron-linuxbridge-agent.service ● neutron-linuxbridge-agent.service - OpenStack Neutron Linux Bridge Agent Loaded: loaded (/usr/lib/systemd/system/neutron-linuxbridge-agent.service; enabled; vendor preset: disabled) Active: active (running) since Sat 2017-08-19 15:48:21 CST; 16min ago Main PID: 3510 (neutron-linuxbr) Tasks: 1 (limit: 9830) CGroup: /system.slice/neutron-linuxbridge-agent.service └─3510 /usr/bin/python2 /usr/bin/neutron-linuxbridge-agent --config-file /usr/share/neutron/neutron-dis Aug 19 15:48:21 controller systemd[1]: Starting OpenStack Neutron Linux Bridge Agent... Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-arptables = 1 Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-iptables = 1 Aug 19 15:48:21 controller neutron-enable-bridge-firewall.sh[3502]: net.bridge.bridge-nf-call-ip6tables = 1 Aug 19 15:48:21 controller systemd[1]: Started OpenStack Neutron Linux Bridge Agent. Aug 19 15:48:21 controller neutron-linuxbridge-agent[3510]: Guru meditation now registers SIGUSR1 and SIGUSR2 by d Aug 19 15:48:22 controller neutron-linuxbridge-agent[3510]: Option "verbose" from group "DEFAULT" is deprecated fo Aug 19 15:48:22 controller neutron-linuxbridge-agent[3510]: Option "notification_driver" from group "DEFAULT" is d Aug 19 15:48:22 controller sudo[3533]: neutron : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/bin/neutron-rootwrap- lines 1-17/17 (END)
十二、驗證操作
[root@controller ~]# . admin-openrc [root@controller ~]# neutron ext-list +---------------------------+-----------------------------------------------+ | alias | name | +---------------------------+-----------------------------------------------+ | default-subnetpools | Default Subnetpools | | network-ip-availability | Network IP Availability | | network_availability_zone | Network Availability Zone | | auto-allocated-topology | Auto Allocated Topology Services | | ext-gw-mode | Neutron L3 Configurable external gateway mode | | binding | Port Binding | | agent | agent | | subnet_allocation | Subnet Allocation | | l3_agent_scheduler | L3 Agent Scheduler | | tag | Tag support | | external-net | Neutron external network | | flavors | Neutron Service Flavors | | net-mtu | Network MTU | | availability_zone | Availability Zone | | quotas | Quota management support | | l3-ha | HA Router extension | | provider | Provider Network | | multi-provider | Multi Provider Network | | address-scope | Address scope | | extraroute | Neutron Extra Route | | subnet-service-types | Subnet service types | | standard-attr-timestamp | Resource timestamps | | service-type | Neutron Service Type Management | | l3-flavors | Router Flavor Extension | | port-security | Port Security | | extra_dhcp_opt | Neutron Extra DHCP opts | | standard-attr-revisions | Resource revision numbers | | pagination | Pagination support | | sorting | Sorting support | | security-group | security-group | | dhcp_agent_scheduler | DHCP Agent Scheduler | | router_availability_zone | Router Availability Zone | | rbac-policies | RBAC Policies | | standard-attr-description | standard-attr-description | | router | Neutron L3 Router | | allowed-address-pairs | Allowed Address Pairs | | project-id | project_id field enabled | | dvr | Distributed Virtual Router | +---------------------------+-----------------------------------------------+
十三、Dashboard
1、Install and configure components
yum install openstack-dashboard -y
2、配置openstack-dashboard
[root@controller ~]# cp /etc/openstack-dashboard/local_settings /etc/openstack-dashboard/local_settings.old
[root@controller ~]# vi /etc/openstack-dashboard/local_settings
[root@controller ~]# cat /etc/openstack-dashboard/local_settings |grep -v "^#"|grep -v "^$" import os from django.utils.translation import ugettext_lazy as _ from openstack_dashboard import exceptions from openstack_dashboard.settings import HORIZON_CONFIG DEBUG = False WEBROOT = '/dashboard/' ALLOWED_HOSTS = ['*', ] OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, } OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default" LOCAL_PATH = '/tmp' SECRET_KEY='1b8a3d57c5bde2cf7506' SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'controller:11211', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' OPENSTACK_HOST = "controller" OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user" OPENSTACK_KEYSTONE_BACKEND = { 'name': 'native', 'can_edit_user': True, 'can_edit_group': True, 'can_edit_project': True, 'can_edit_domain': True, 'can_edit_role': True, } OPENSTACK_HYPERVISOR_FEATURES = { 'can_set_mount_point': False, 'can_set_password': False, 'requires_keypair': False, 'enable_quotas': True } OPENSTACK_CINDER_FEATURES = { 'enable_backup': False, } OPENSTACK_NEUTRON_NETWORK = { 'enable_router': True, 'enable_quotas': True, 'enable_ipv6': True, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': True, 'enable_firewall': True, 'enable_vpn': True, 'enable_fip_topology_check': True, # Default dns servers you would like to use when a subnet is # created. This is only a default, users can still choose a different # list of dns servers when creating a new subnet. # The entries below are examples only, and are not appropriate for # real deployments # 'default_dns_nameservers': ["8.8.8.8", "8.8.4.4", "208.67.222.222"], # The profile_support option is used to detect if an external router can be # configured via the dashboard. When using specific plugins the # profile_support can be turned on if needed. 'profile_support': None, #'profile_support': 'cisco', # Set which provider network types are supported. Only the network types # in this list will be available to choose from when creating a network. # Network types include local, flat, vlan, gre, vxlan and geneve. # 'supported_provider_types': ['*'], # You can configure available segmentation ID range per network type # in your deployment. # 'segmentation_id_range': { # 'vlan': [1024, 2048], # 'vxlan': [4094, 65536], # }, # You can define additional provider network types here. # 'extra_provider_types': { # 'awesome_type': { # 'display_name': 'Awesome New Type', # 'require_physical_network': False, # 'require_segmentation_id': True, # } # }, # Set which VNIC types are supported for port binding. Only the VNIC # types in this list will be available to choose from when creating a # port. # VNIC types include 'normal', 'macvtap' and 'direct'. # Set to empty list or None to disable VNIC type selection. 'supported_vnic_types': ['*'], } OPENSTACK_HEAT_STACK = { 'enable_user_pass': True, } IMAGE_CUSTOM_PROPERTY_TITLES = { "architecture": _("Architecture"), "kernel_id": _("Kernel ID"), "ramdisk_id": _("Ramdisk ID"), "image_state": _("Euca2ools state"), "project_id": _("Project ID"), "image_type": _("Image Type"), } IMAGE_RESERVED_CUSTOM_PROPERTIES = [] API_RESULT_LIMIT = 1000 API_RESULT_PAGE_SIZE = 20 SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 INSTANCE_LOG_LENGTH = 35 DROPDOWN_MAX_ITEMS = 30 TIME_ZONE = "UTC" POLICY_FILES_PATH = '/etc/openstack-dashboard' LOGGING = { 'version': 1, # When set to True this will disable all logging except # for loggers specified in this configuration dictionary. Note that # if nothing is specified here and disable_existing_loggers is True, # django.db.backends will still log unless it is disabled explicitly. 'disable_existing_loggers': False, 'formatters': { 'operation': { # The format of "%(message)s" is defined by # OPERATION_LOG_OPTIONS['format'] 'format': '%(asctime)s %(message)s' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'console': { # Set the level to "DEBUG" for verbose output logging. 'level': 'INFO', 'class': 'logging.StreamHandler', }, 'operation': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'operation', }, }, 'loggers': { # Logging from django.db.backends is VERY verbose, send to null # by default. 'django.db.backends': { 'handlers': ['null'], 'propagate': False, }, 'requests': { 'handlers': ['null'], 'propagate': False, }, 'horizon': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'horizon.operation_log': { 'handlers': ['operation'], 'level': 'INFO', 'propagate': False, }, 'openstack_dashboard': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'novaclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'cinderclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'keystoneclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'glanceclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'neutronclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'heatclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'ceilometerclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'swiftclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_auth': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'nose.plugins.manager': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'iso8601': { 'handlers': ['null'], 'propagate': False, }, 'scss': { 'handlers': ['null'], 'propagate': False, }, }, } SECURITY_GROUP_RULES = { 'all_tcp': { 'name': _('All TCP'), 'ip_protocol': 'tcp', 'from_port': '1', 'to_port': '65535', }, 'all_udp': { 'name': _('All UDP'), 'ip_protocol': 'udp', 'from_port': '1', 'to_port': '65535', }, 'all_icmp': { 'name': _('All ICMP'), 'ip_protocol': 'icmp', 'from_port': '-1', 'to_port': '-1', }, 'ssh': { 'name': 'SSH', 'ip_protocol': 'tcp', 'from_port': '22', 'to_port': '22', }, 'smtp': { 'name': 'SMTP', 'ip_protocol': 'tcp', 'from_port': '25', 'to_port': '25', }, 'dns': { 'name': 'DNS', 'ip_protocol': 'tcp', 'from_port': '53', 'to_port': '53', }, 'http': { 'name': 'HTTP', 'ip_protocol': 'tcp', 'from_port': '80', 'to_port': '80', }, 'pop3': { 'name': 'POP3', 'ip_protocol': 'tcp', 'from_port': '110', 'to_port': '110', }, 'imap': { 'name': 'IMAP', 'ip_protocol': 'tcp', 'from_port': '143', 'to_port': '143', }, 'ldap': { 'name': 'LDAP', 'ip_protocol': 'tcp', 'from_port': '389', 'to_port': '389', }, 'https': { 'name': 'HTTPS', 'ip_protocol': 'tcp', 'from_port': '443', 'to_port': '443', }, 'smtps': { 'name': 'SMTPS', 'ip_protocol': 'tcp', 'from_port': '465', 'to_port': '465', }, 'imaps': { 'name': 'IMAPS', 'ip_protocol': 'tcp', 'from_port': '993', 'to_port': '993', }, 'pop3s': { 'name': 'POP3S', 'ip_protocol': 'tcp', 'from_port': '995', 'to_port': '995', }, 'ms_sql': { 'name': 'MS SQL', 'ip_protocol': 'tcp', 'from_port': '1433', 'to_port': '1433', }, 'mysql': { 'name': 'MYSQL', 'ip_protocol': 'tcp', 'from_port': '3306', 'to_port': '3306', }, 'rdp': { 'name': 'RDP', 'ip_protocol': 'tcp', 'from_port': '3389', 'to_port': '3389', }, } REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', 'LAUNCH_INSTANCE_DEFAULTS', 'OPENSTACK_IMAGE_FORMATS'] ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
3、啟動服務並檢查
[root@controller ~]# systemctl restart httpd.service memcached.service [root@controller ~]# systemctl status httpd.service memcached.service
4、驗證操作
配置域名解析: windows的/etc/hosts文件
10.0.0.17 controller computer ntpserver 10.0.100.214 controller
訪問網站:
http://controller/dashboard
十四、Block Storage service
#創建Cinder數據庫
1、創建數據庫
mysql -u root -p CREATE DATABASE cinder; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ IDENTIFIED BY '123456';
2、創建服務器憑證
. admin-openrc openstack user create --domain default --password-prompt cinder User Password: #輸入密碼 Repeat User Password: #輸入密碼 openstack role add --project service --user cinder admin
3、創建服務實體
openstack service create --name cinder \ --description "OpenStack Block Storage" volume openstack service create --name cinderv2 \ --description "OpenStack Block Storage" volumev2 openstack endpoint create --region RegionOne \ volume public http://controller:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne \ volume internal http://controller:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne \ volume admin http://controller:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne \ volumev2 public http://controller:8776/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne \ volumev2 internal http://controller:8776/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne \ volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
4、安裝和配置組件
yum install openstack-cinder -y
#備份配置文件
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
vi /etc/cinder/cinder.conf
[root@controller ~]# cat /etc/cinder/cinder.conf|grep -v "^#"|grep -v "^$" [DEFAULT] transport_url = rabbit://openstack:123456@controller auth_strategy = keystone my_ip = 10.0.0.17 [BACKEND] [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [COORDINATION] [FC-ZONE-MANAGER] [KEY_MANAGER] [barbican] [cors] [cors.subdomain] [database] connection = mysql+pymysql://cinder:123456@controller/cinder [key_manager] [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = Default user_domain_name = Default project_name = service username = cinder password = 123456 [matchmaker_redis] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_messaging_zmq] [oslo_middleware] [oslo_policy] [oslo_reports] [oslo_versionedobjects] [ssl]
#Configure Compute to use Block Storage(先不操作這步,后面配ceph存儲的時候再配置)
#Edit the /etc/nova/nova.conf file and add the following to it:
[cinder]
os_region_name = RegionOne
#Restart the Compute API service:(#這步不需要操作)
systemctl restart openstack-nova-api.service
#啟動服務
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
十五、Install and configure a storage node
1、使用本地存儲,需要配置這步。
#系統自帶:
[root@controller ~]# rpm -qa lvm2 lvm2-2.02.167-3.fc25.x86_64
#沒有就安裝
yum install lvm2 -y
systemctl enable lvm2-lvmetad.service systemctl start lvm2-lvmetad.service
2、Create the LVM physical volume /dev/sdb
:
pvcreate /dev/sdb #結果 Physical volume "/dev/sdb" successfully created
3、Create the LVM volume group cinder-volumes
:(塊存儲服務在此卷組中創建邏輯卷)
vgcreate cinder-volumes /dev/sdb #結果 Volume group "cinder-volumes" successfully created
6、只有實例可以訪問塊存儲卷。但是,底層操作系統管理與卷關聯的設備。默認情況下,LVM卷掃描工具會掃描/dev包含卷的塊存儲設備的目錄。
如果項目在其卷上使用LVM,則掃描工具會檢測這些卷並嘗試緩存它們,這可能會導致底層操作系統和項目卷的各種問題。您必須重新配置LVM
以僅掃描包含cinder-volumes卷組的設備。編輯 /etc/lvm/lvm.conf文件並完成以下操作:
devices { filter = [“a / sdb /”,“r /.*/”] filter = [“a / sda /”,“a / sdb /”,“r /.*/”] filter = [“a / sda /”,“r /.*/”]
十六、Install and configure components(直接操作這步,安裝在控制節點上面。)
1、安裝軟件包
yum install openstack-cinder targetcli python-keystone
2、vi /etc/cinder/cinder.conf
配置上面配置文件的時候,遇到一個坑:[lvm] 沒有這個標簽