之前的文章介紹了LVS負載均衡-基礎知識梳理, 下面記錄下LVS+Keepalived高可用環境部署梳理(主主和主從模式)的操作流程:
一、LVS+Keepalived主從熱備的高可用環境部署
1)環境准備
LVS_Keepalived_Master 182.148.15.237 LVS_Keepalived_Backup 182.148.15.236 Real_Server1 182.148.15.233 Real_Server2 182.148.15.238 VIP 182.148.15.239 系統版本都是centos6.8 特別注意: Director Server與Real Server必須有一塊網卡連在同一物理網段上!否則lvs會轉發失敗! 在遠程telnet vip port會報錯: "telnet: connect to address *.*.*.*: No route to host"
基本的網絡拓撲圖如下:

2)LVS_keepalived_Master和LVS_keepalived_Backup兩台服務器上安裝配置LVS和keepalived的操作記錄:
1)關閉 SElinux、配置防火牆(在LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器上都要操作)
[root@LVS_Keepalived_Master ~]# vim /etc/sysconfig/selinux
#SELINUX=enforcing #注釋掉
#SELINUXTYPE=targeted #注釋掉
SELINUX=disabled #增加
[root@LVS_Keepalived_Master ~]# setenforce 0 #臨時關閉selinux。上面文件配置后,重啟機器后就永久生效。
注意下面182.148.15.0/24是服務器的公網網段,192.168.1.0/24是服務器的私網網段
一定要注意:加上這個組播規則后,MASTER和BACKUP故障時,才能實現VIP資源的正常轉移。其故障恢復后,VIP也還會正常轉移回來。
[root@LVS_Keepalived_Master ~]# vim /etc/sysconfig/iptables
.......
-A INPUT -s 182.148.15.0/24 -d 224.0.0.18 -j ACCEPT #允許組播地址通信。
-A INPUT -s 192.168.1.0/24 -d 224.0.0.18 -j ACCEPT
-A INPUT -s 182.148.15.0/24 -p vrrp -j ACCEPT #允許 VRRP(虛擬路由器冗余協)通信
-A INPUT -s 192.168.1.0/24 -p vrrp -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
[root@LVS_Keepalived_Master ~]# /etc/init.d/iptables restart
----------------------------------------------------------------------------------------------------------------------
2)LVS安裝(在LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器上都要操作)
需要安裝以下軟件包
[root@LVS_Keepalived_Master ~]# yum install -y libnl* popt*
查看是否加載lvs模塊
[root@LVS_Keepalived_Master src]# modprobe -l |grep ipvs
下載並安裝LVS
[root@LVS_Keepalived_Master ~]# cd /usr/local/src/
[root@LVS_Keepalived_Master src]# wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.26.tar.gz
解壓安裝
[root@LVS_Keepalived_Master src]# ln -s /usr/src/kernels/2.6.32-431.5.1.el6.x86_64/ /usr/src/linux
[root@LVS_Keepalived_Master src]# tar -zxvf ipvsadm-1.26.tar.gz
[root@LVS_Keepalived_Master src]# cd ipvsadm-1.26
[root@LVS_Keepalived_Master ipvsadm-1.26]# make && make install
LVS安裝完成,查看當前LVS集群
[root@LVS_Keepalived_Master ipvsadm-1.26]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
----------------------------------------------------------------------------------------------------------------------
3)編寫LVS啟動腳本/etc/init.d/realserver(在Real_Server1 和Real_Server2上都要操作,realserver腳本內容是一樣的)
[root@Real_Server1 ~]# vim /etc/init.d/realserver
#!/bin/sh
VIP=182.148.15.239
. /etc/rc.d/init.d/functions
case "$1" in
# 禁用本地的ARP請求、綁定本地回環地址
start)
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/sysctl -p >/dev/null 2>&1
/sbin/ifconfig lo:0 $VIP netmask 255.255.255.255 up #在回環地址上綁定VIP,設定掩碼,與Direct Server(自身)上的IP保持通信
/sbin/route add -host $VIP dev lo:0
echo "LVS-DR real server starts successfully.\n"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $VIP >/dev/null 2>&1
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
;;
status)
isLoOn=`/sbin/ifconfig lo:0 | grep "$VIP"`
isRoOn=`/bin/netstat -rn | grep "$VIP"`
if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
echo "LVS-DR real server has run yet."
else
echo "LVS-DR real server is running."
fi
exit 3
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
esac
exit 0
將lvs腳本加入開機自啟動
[root@Real_Server1 ~]# chmod +x /etc/init.d/realserver
[root@Real_Server1 ~]# echo "/etc/init.d/realserver start" >> /etc/rc.d/rc.local
啟動LVS腳本(注意:如果這兩台realserver機器重啟了,一定要確保service realserver start 啟動了,即lo:0本地回環上綁定了vip地址,否則lvs轉發失敗!)
[root@Real_Server1 ~]# service realserver start
LVS-DR real server starts successfully.\n
查看Real_Server1服務器,發現VIP已經成功綁定到本地回環口lo上了
[root@Real_Server1 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 52:54:00:D1:27:75
inet addr:182.148.15.233 Bcast:182.148.15.255 Mask:255.255.255.224
inet6 addr: fe80::5054:ff:fed1:2775/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:309741 errors:0 dropped:0 overruns:0 frame:0
TX packets:27993954 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:37897512 (36.1 MiB) TX bytes:23438654329 (21.8 GiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 b) TX bytes:0 (0.0 b)
lo:0 Link encap:Local Loopback
inet addr:182.148.15.239 Mask:255.255.255.255
UP LOOPBACK RUNNING MTU:65536 Metric:1
----------------------------------------------------------------------------------------------------------------------
4)安裝Keepalived(LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器都要操作)
[root@LVS_Keepalived_Master ~]# yum install -y openssl-devel
[root@LVS_Keepalived_Master ~]# cd /usr/local/src/
[root@LVS_Keepalived_Master src]# wget http://www.keepalived.org/software/keepalived-1.3.5.tar.gz
[root@LVS_Keepalived_Master src]# tar -zvxf keepalived-1.3.5.tar.gz
[root@LVS_Keepalived_Master src]# cd keepalived-1.3.5
[root@LVS_Keepalived_Master keepalived-1.3.5]# ./configure --prefix=/usr/local/keepalived
[root@LVS_Keepalived_Master keepalived-1.3.5]# make && make install
[root@LVS_Keepalived_Master keepalived-1.3.5]# cp /usr/local/src/keepalived-1.3.5/keepalived/etc/init.d/keepalived /etc/rc.d/init.d/
[root@LVS_Keepalived_Master keepalived-1.3.5]# cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@LVS_Keepalived_Master keepalived-1.3.5]# mkdir /etc/keepalived/
[root@LVS_Keepalived_Master keepalived-1.3.5]# cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
[root@LVS_Keepalived_Master keepalived-1.3.5]# cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
[root@LVS_Keepalived_Master keepalived-1.3.5]# echo "/etc/init.d/keepalived start" >> /etc/rc.local
[root@LVS_Keepalived_Master keepalived-1.3.5]# chmod +x /etc/rc.d/init.d/keepalived #添加執行權限
[root@LVS_Keepalived_Master keepalived-1.3.5]# chkconfig keepalived on #設置開機啟動
[root@LVS_Keepalived_Master keepalived-1.3.5]# service keepalived start #啟動
[root@LVS_Keepalived_Master keepalived-1.3.5]# service keepalived stop #關閉
[root@LVS_Keepalived_Master keepalived-1.3.5]# service keepalived restart #重啟
----------------------------------------------------------------------------------------------------------------------
5)接着配置LVS+Keepalived配置
現在LVS_Keepalived_Master和LVS_Keepalived_Backup兩台機器上打開ip_forward轉發功能
[root@LVS_Keepalived_Master ~]# echo "1" > /proc/sys/net/ipv4/ip_forward
LVS_Keepalived_Master機器上的keepalived.conf配置:
[root@LVS_Keepalived_Master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_Master
}
vrrp_instance VI_1 {
state MASTER #指定instance初始狀態,實際根據優先級決定.backup節點不一樣
interface eth0 #虛擬IP所在網
virtual_router_id 51 #VRID,相同VRID為一個組,決定多播MAC地址
priority 100 #優先級,另一台改為90.backup節點不一樣
advert_int 1 #檢查間隔
authentication {
auth_type PASS #認證方式,可以是pass或ha
auth_pass 1111 #認證密碼
}
virtual_ipaddress {
182.148.15.239 #VIP
}
}
virtual_server 182.148.15.239 80 {
delay_loop 6 #服務輪詢的時間間隔
lb_algo wrr #加權輪詢調度,LVS調度算法 rr|wrr|lc|wlc|lblc|sh|sh
lb_kind DR #LVS集群模式 NAT|DR|TUN,其中DR模式要求負載均衡器網卡必須有一塊與物理網卡在同一個網段
#nat_mask 255.255.255.0
persistence_timeout 50 #會話保持時間
protocol TCP #健康檢查協議
## Real Server設置,80就是連接端口
real_server 182.148.15.233 80 {
weight 3 ##權重
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
啟動keepalived
[root@LVS_Keepalived_Master ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
inet 182.48.115.237/27 brd 182.48.115.255 scope global eth0
inet 182.48.115.239/32 scope global eth0
inet6 fe80::5054:ff:fe68:dcb6/64 scope link
valid_lft forever preferred_lft forever
注意此時網卡的變化,可以看到虛擬網卡已經分配到了realserver上。
此時查看LVS集群狀態,可以看到集群下有兩個Real Server,調度算法,權重等信息。ActiveConn代表當前Real Server的活躍連接數。
[root@LVS_Keepalived_Master ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 182.48.115.239:80 wrr persistent 50
-> 182.48.115.233:80 Route 3 0 0
-> 182.48.115.238:80 Route 3 0 0
-------------------------------------------------------------------------
LVS_Keepalived_Backup機器上的keepalived.conf配置:
[root@LVS_Keepalived_Backup ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_Backup
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
182.148.15.239
}
}
virtual_server 182.148.15.239 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 182.148.15.233 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
[root@LVS_Keepalived_Backup ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
查看LVS_Keepalived_Backup機器上,發現VIP默認在LVS_Keepalived_Master機器上,只要當LVS_Keepalived_Backup發生故障時,VIP資源才會飄到自己這邊來。
[root@LVS_Keepalived_Backup ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:7c:b8:f0 brd ff:ff:ff:ff:ff:ff
inet 182.48.115.236/27 brd 182.48.115.255 scope global eth0
inet 182.48.115.239/27 brd 182.48.115.255 scope global secondary eth0:0
inet6 fe80::5054:ff:fe7c:b8f0/64 scope link
valid_lft forever preferred_lft forever
[root@LVS_Keepalived_Backup ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 182.48.115.239:80 wrr persistent 50
-> 182.48.115.233:80 Route 3 0 0
-> 182.48.115.238:80 Route 3 0 0
3)后端兩台Real Server上的操作
在兩台Real Server上配置好nginx,nginx安裝配置過程省略。 分別在兩台Real Server上配置兩個域名www.wangshibo.com和www.guohuihui.com。 在LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器上要能正常訪問這兩個域名 [root@LVS_Keepalived_Master ~]# curl http://www.wangshibo.com this is page of Real_Server1:182.148.15.238 www.wangshibo.com [root@LVS_Keepalived_Master ~]# curl http://www.guohuihui.com this is page of Real_Server2:182.148.15.238 www.guohuihui.com [root@LVS_Keepalived_Backup ~]# curl http://www.wangshibo.com this is page of Real_Server1:182.148.15.238 www.wangshibo.com [root@LVS_Keepalived_Backup ~]# curl http://www.guohuihui.com this is page of Real_Server2:182.148.15.238 www.guohuihui.com 關閉182.148.15.238這台機器(即Real_Server2)的nginx,發現對應域名的請求就會到Real_Server1上 [root@Real_Server2 ~]# /usr/local/nginx/sbin/nginx -s stop [root@Real_Server2 ~]# lsof -i:80 [root@Real_Server2 ~]# 再次在LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器上訪問這兩個域名,就會發現已經負載到Real_Server1上了 [root@LVS_Keepalived_Master ~]# curl http://www.wangshibo.com this is page of Real_Server1:182.148.15.233 www.wangshibo.com [root@LVS_Keepalived_Master ~]# curl http://www.guohuihui.com this is page of Real_Server1:182.148.15.233 www.guohuihui.com [root@LVS_Keepalived_Backup ~]# curl http://www.wangshibo.com this is page of Real_Server1:182.148.15.233 www.wangshibo.com [root@LVS_Keepalived_Backup ~]# curl http://www.guohuihui.com this is page of Real_Server1:182.148.15.233 www.guohuihui.com 另外,設置這兩台Real Server的iptables,讓其80端口只對前面的兩個vip資源開放 [root@Real_Server1 ~]# vim /etc/sysconfig/iptables ...... -A INPUT -s 182.148.15.239 -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT [root@Real_Server1 ~]# /etc/init.d/iptables restart
4)測試
將www.wangshibo.com和www.guohuihui.com測試域名解析到VIP:182.148.15.239,然后在瀏覽器里是可以正常訪問的。


1)測試LVS功能(上面Keepalived的lvs配置中,自帶了健康檢查,當后端服務器的故障出現故障后會自動從lvs集群中踢出,當故障恢復后,再自動加入到集群中)
先查看當前LVS集群,如下:發現后端兩台Real Server的80端口都運行正常
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 182.148.15.239:80 wrr persistent 50
-> 182.148.15.233:80 Route 3 0 0
-> 182.148.15.238:80 Route 3 0 0
現在測試關閉一台Real Server,比如Real_Server2
[root@Real_Server2 ~]# /usr/local/nginx/sbin/nginx -s stop
過一會兒再次查看當前LVS集群,如下:發現Real_Server2已經被踢出當前LVS集群了
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 182.148.15.239:80 wrr persistent 50
-> 182.148.15.233:80 Route 3 0 0
最后重啟Real_Server2的80端口,發現LVS集群里又再次將其添加進來了
[root@Real_Server2 ~]# /usr/local/nginx/sbin/nginx
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 182.148.15.239:80 wrr persistent 50
-> 182.148.15.233:80 Route 3 0 0
-> 182.148.15.238:80 Route 3 0 0
以上測試中,http://www.wangshibo.com和http://www.guohuihui.com域名訪問都不受影響。
2)測試Keepalived心跳測試的高可用
默認情況下,VIP資源是在LVS_Keepalived_Master上
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
inet 182.148.15.239/32 scope global eth0
inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
inet6 fe80::5054:ff:fe68:dcb6/64 scope link
valid_lft forever preferred_lft forever
然后關閉LVS_Keepalived_Master的keepalived,發現VIP就會轉移到LVS_Keepalived_Backup上。
[root@LVS_Keepalived_Master ~]# /etc/init.d/keepalived stop
Stopping keepalived: [ OK ]
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
inet6 fe80::5054:ff:fe68:dcb6/64 scope link
valid_lft forever preferred_lft forever
查看系統日志,能查看到LVS_Keepalived_Master的VIP的移動信息
[root@LVS_Keepalived_Master ~]# tail -f /var/log/messages
.............
May 8 10:19:36 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: TCP connection to [182.148.15.233]:80 failed.
May 8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: TCP connection to [182.148.15.233]:80 failed.
May 8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: Check on service [182.148.15.233]:80 failed after 1 retry.
May 8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: Removing service [182.148.15.233]:80 from VS [182.148.15.239]:80
[root@LVS_Keepalived_Backup ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:7c:b8:f0 brd ff:ff:ff:ff:ff:ff
inet 182.148.15.236/27 brd 182.148.15.255 scope global eth0
inet 182.148.15.239/32 scope global eth0
inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
inet6 fe80::5054:ff:fe7c:b8f0/64 scope link
valid_lft forever preferred_lft forever
接着再重新啟動LVS_Keepalived_Master的keepalived,發現VIP又轉移回來了
[root@LVS_Keepalived_Master ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
inet 182.148.15.239/32 scope global eth0
inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
inet6 fe80::5054:ff:fe68:dcb6/64 scope link
valid_lft forever preferred_lft forever
查看系統日志,能查看到LVS_Keepalived_Master的VIP轉移回來的信息
[root@LVS_Keepalived_Master ~]# tail -f /var/log/messages
.............
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on eth0 for 182.148.15.239
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May 8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
二、LVS+Keepalived主主熱備的高可用環境部署
主主環境相比於主從環境,區別只在於:
1)LVS負載均衡層需要兩個VIP。比如182.148.15.239和182.148.15.235
2)后端的realserver上要綁定這兩個VIP到lo本地回環口上
3)Keepalived.conf的配置相比於上面的主從模式也有所不同
主主架構的具體配置如下:
1)編寫LVS啟動腳本(在Real_Server1 和Real_Server2上都要操作,realserver腳本內容是一樣的)
由於后端realserver機器要綁定兩個VIP到本地回環口lo上(分別綁定到lo:0和lo:1),所以需要編寫兩個啟動腳本
[root@Real_Server1 ~]# vim /etc/init.d/realserver1
#!/bin/sh
VIP=182.148.15.239
. /etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/sysctl -p >/dev/null 2>&1
/sbin/ifconfig lo:0 $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:0
echo "LVS-DR real server starts successfully.\n"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $VIP >/dev/null 2>&1
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
;;
status)
isLoOn=`/sbin/ifconfig lo:0 | grep "$VIP"`
isRoOn=`/bin/netstat -rn | grep "$VIP"`
if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
echo "LVS-DR real server has run yet."
else
echo "LVS-DR real server is running."
fi
exit 3
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
esac
exit 0
[root@Real_Server1 ~]# vim /etc/init.d/realserver2
#!/bin/sh
VIP=182.148.15.235
. /etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo down
/sbin/ifconfig lo up
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/sbin/sysctl -p >/dev/null 2>&1
/sbin/ifconfig lo:1 $VIP netmask 255.255.255.255 up
/sbin/route add -host $VIP dev lo:1
echo "LVS-DR real server starts successfully.\n"
;;
stop)
/sbin/ifconfig lo:1 down
/sbin/route del $VIP >/dev/null 2>&1
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
;;
status)
isLoOn=`/sbin/ifconfig lo:1 | grep "$VIP"`
isRoOn=`/bin/netstat -rn | grep "$VIP"`
if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
echo "LVS-DR real server has run yet."
else
echo "LVS-DR real server is running."
fi
exit 3
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
esac
exit 0
將lvs腳本加入開機自啟動
[root@Real_Server1 ~]# chmod +x /etc/init.d/realserver1
[root@Real_Server1 ~]# chmod +x /etc/init.d/realserver2
[root@Real_Server1 ~]# echo "/etc/init.d/realserver1" >> /etc/rc.d/rc.local
[root@Real_Server1 ~]# echo "/etc/init.d/realserver2" >> /etc/rc.d/rc.local
啟動LVS腳本
[root@Real_Server1 ~]# service realserver1 start
LVS-DR real server starts successfully.\n
[root@Real_Server1 ~]# service realserver2 start
LVS-DR real server starts successfully.\n
查看Real_Server1服務器,發現VIP已經成功綁定到本地回環口lo上了
[root@Real_Server1 ~]# ifconfig
eth0 Link encap:Ethernet HWaddr 52:54:00:D1:27:75
inet addr:182.148.15.233 Bcast:182.148.15.255 Mask:255.255.255.224
inet6 addr: fe80::5054:ff:fed1:2775/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:309741 errors:0 dropped:0 overruns:0 frame:0
TX packets:27993954 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:37897512 (36.1 MiB) TX bytes:23438654329 (21.8 GiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:65536 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 b) TX bytes:0 (0.0 b)
lo:0 Link encap:Local Loopback
inet addr:182.148.15.239 Mask:255.255.255.255
UP LOOPBACK RUNNING MTU:65536 Metric:1
lo:1 Link encap:Local Loopback
inet addr:182.148.15.235 Mask:255.255.255.255
UP LOOPBACK RUNNING MTU:65536 Metric:1
2)Keepalived.conf的配置
LVS_Keepalived_Master機器上的Keepalived.conf配置
先打開ip_forward路由轉發功能
[root@LVS_Keepalived_Master ~]# echo "1" > /proc/sys/net/ipv4/ip_forward
[root@LVS_Keepalived_Master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_Master
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
182.148.15.239
}
}
vrrp_instance VI_2 {
state BACKUP
interface eth0
virtual_router_id 52
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
182.148.15.235
}
}
virtual_server 182.148.15.239 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 182.148.15.233 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
virtual_server 182.148.15.235 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 182.148.15.233 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
LVS_Keepalived_Backup機器上的Keepalived.conf配置
[root@LVS_Keepalived_Backup ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_Backup
}
vrrp_instance VI_1 {
state Backup
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
182.148.15.239
}
}
vrrp_instance VI_2 {
state Master
interface eth0
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
182.148.15.235
}
}
virtual_server 182.148.15.239 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 182.148.15.233 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
virtual_server 182.148.15.235 80 {
delay_loop 6
lb_algo wrr
lb_kind DR
#nat_mask 255.255.255.0
persistence_timeout 50
protocol TCP
real_server 182.148.15.233 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
real_server 182.148.15.238 80 {
weight 3
TCP_CHECK {
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
connect_port 80
}
}
}
其他驗證操作和上面主從模式一樣~~~
