lvs+keepalive主從和主主架構


下面配置主從

1)關閉SELinux和防火牆
vi /etc/sysconfig/selinux
SELINUX=disabled

setenforce 0            臨時關閉SELinux,文件配置后,重啟生效
182.148.15.0/24是服務器的公網網段,192.168.1.0/24是服務器的私網網段
一定要注意:加上這個組播規則后,MASTER和BACKUP故障時,才能實現VIP資源的正常轉移。其故障恢復后,VIP也還會正常轉移回來。
vim /etc/sysconfig/iptables
-A INPUT -s 182.148.15.0/24 -d 224.0.0.18 -j ACCEPT      #允許組播地址通信。
-A INPUT -s 192.168.1.0/24 -d 224.0.0.18 -j ACCEPT
-A INPUT -s 182.148.15.0/24 -p vrrp -j ACCEPT            #允許 VRRP(虛擬路由器冗余協)通信
-A INPUT -s 192.168.1.0/24 -p vrrp -j ACCEPT
-A INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT

/etc/init.d/iptables restart

 

2) lvs安裝(主備兩台都要操作)

 yum install -y libnl* popt*
 modprobe -l |grep ipvs
 cd /usr/local/src/
wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.26.tar.gz
ln -s /usr/src/kernels/2.6.32-431.5.1.el6.x86_64/ /usr/src/linux
tar -zxvf ipvsadm-1.26.tar.gz
cd ipvsadm-1.26
make && make install

查看lvs集群
ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

3)編寫lvs啟動腳本(兩台都要操作)

vim /etc/init.d/realserver
#!/bin/sh
VIP=182.148.15.239
. /etc/rc.d/init.d/functions
    
case "$1" in
# 禁用本地的ARP請求、綁定本地回環地址
start)
    /sbin/ifconfig lo down
    /sbin/ifconfig lo up
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
    /sbin/sysctl -p >/dev/null 2>&1
    /sbin/ifconfig lo:0 $VIP netmask 255.255.255.255 up     #在回環地址上綁定VIP,設定掩碼,與Direct Server(自身)上的IP保持通信
    /sbin/route add -host $VIP dev lo:0
    echo "LVS-DR real server starts successfully.\n"
    ;;
stop)
    /sbin/ifconfig lo:0 down
    /sbin/route del $VIP >/dev/null 2>&1
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
    ;;
status)
    isLoOn=`/sbin/ifconfig lo:0 | grep "$VIP"`
    isRoOn=`/bin/netstat -rn | grep "$VIP"`
    if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
        echo "LVS-DR real server has run yet."
    else
        echo "LVS-DR real server is running."
    fi
    exit 3
    ;;
*)
    echo "Usage: $0 {start|stop|status}"
    exit 1
esac
exit 0
將lvs腳本加入開機自啟動
chmod +x /etc/init.d/realserver
echo "/etc/init.d/realserver start" >> /etc/rc.d/rc.local

啟動LVS腳本(注意:如果這兩台realserver機器重啟了,一定要確保service realserver start  啟動了,即lo:0本地回環上綁定了vip地址,否則lvs轉發失敗!)
service realserver start

查看Real_Server1服務器,發現VIP已經成功綁定到本地回環口lo上了
ifconfig
eth0      Link encap:Ethernet  HWaddr 52:54:00:D1:27:75
          inet addr:182.148.15.233  Bcast:182.148.15.255  Mask:255.255.255.224
          inet6 addr: fe80::5054:ff:fed1:2775/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:309741 errors:0 dropped:0 overruns:0 frame:0
          TX packets:27993954 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:37897512 (36.1 MiB)  TX bytes:23438654329 (21.8 GiB)
   
lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 b)  TX bytes:0 (0.0 b)
   
lo:0      Link encap:Local Loopback
          inet addr:182.148.15.239  Mask:255.255.255.255
          UP LOOPBACK RUNNING  MTU:65536  Metric:1

 

4)安裝keepalive(主備)

 yum install -y openssl-devel
cd /usr/local/src/
wget http://www.keepalived.org/software/keepalived-1.3.5.tar.gz
tar -zvxf keepalived-1.3.5.tar.gz
cd keepalived-1.3.5
./configure --prefix=/usr/local/keepalived
make && make install

cp /usr/local/src/keepalived-1.3.5/keepalived/etc/init.d/keepalived /etc/rc.d/init.d/
cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
mkdir /etc/keepalived/
cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
echo "/etc/init.d/keepalived start" >> /etc/rc.local

chmod +x /etc/rc.d/init.d/keepalived
chkconfig keepalived on
service keepalived start

5)配置

主備打開ip_forward轉發功能
echo "1" > /proc/sys/net/ipv4/ip_forward

主的keepalive.conf配置
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
    
global_defs {
   router_id LVS_Master
}
    
vrrp_instance VI_1 {
    state MASTER               #指定instance初始狀態,實際根據優先級決定.backup節點不一樣
    interface eth0             #虛擬IP所在網
    virtual_router_id 51       #VRID,相同VRID為一個組,決定多播MAC地址
    priority 100               #優先級,另一台改為90.backup節點不一樣
    advert_int 1               #檢查間隔
    authentication {
        auth_type PASS         #認證方式,可以是pass或ha
        auth_pass 1111         #認證密碼
    }
    virtual_ipaddress {
        182.148.15.239         #VIP
    }
}
    
virtual_server 182.148.15.239 80 {
    delay_loop 6               #服務輪詢的時間間隔
    lb_algo wrr                #加權輪詢調度,LVS調度算法 rr|wrr|lc|wlc|lblc|sh|sh
    lb_kind DR                 #LVS集群模式 NAT|DR|TUN,其中DR模式要求負載均衡器網卡必須有一塊與物理網卡在同一個網段
    #nat_mask 255.255.255.0
    persistence_timeout 50     #會話保持時間
    protocol TCP              #健康檢查協議
    
    ## Real Server設置,80就是連接端口
    real_server 182.148.15.233 80 {
        weight 3  ##權重
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}

/etc/init.d/keepadlived start
ip addr

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
    inet 182.48.115.237/27 brd 182.48.115.255 scope global eth0
    inet 182.48.115.239/32 scope global eth0
    inet6 fe80::5054:ff:fe68:dcb6/64 scope link
       valid_lft forever preferred_lft foreve

ipvsadm -ln

IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  182.48.115.239:80 wrr persistent 50
  -> 182.48.115.233:80            Route   3      0          0      
  -> 182.48.115.238:80            Route   3      0          0


備用上的keepalived.conf配置
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
    
global_defs {
   router_id LVS_Backup
}
    
vrrp_instance VI_1 {
    state BACKUP          
    interface eth0         
    virtual_router_id 51   
    priority 90           
    advert_int 1          
    authentication {
        auth_type PASS     
        auth_pass 1111     
    }
    virtual_ipaddress {
        182.148.15.239     
    }
}
    
virtual_server 182.148.15.239 80 {
    delay_loop 6          
    lb_algo wrr           
    lb_kind DR             
    
    persistence_timeout 50 
    protocol TCP         
    
    real_server 182.148.15.233 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}

/etc/init.d/keepalived start
ip addr
查看LVS_Keepalived_Backup機器上,發現VIP默認在LVS_Keepalived_Master機器上,只要當LVS_Keepalived_Backup發生故障時,VIP資源才會飄到自己這邊來。

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:7c:b8:f0 brd ff:ff:ff:ff:ff:ff
    inet 182.48.115.236/27 brd 182.48.115.255 scope global eth0
    inet 182.48.115.239/27 brd 182.48.115.255 scope global secondary eth0:0
    inet6 fe80::5054:ff:fe7c:b8f0/64 scope link
       valid_lft forever preferred_lft forever

 ipvsadm -L -n

IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  182.48.115.239:80 wrr persistent 50
  -> 182.48.115.233:80            Route   3      0          0      
  -> 182.48.115.238:80            Route   3      0          0

3)后端真實服務器上的操作

分別在兩台Real Server上配置兩個域名www.test1.com,www.test2.com

在master和backup正常訪問兩個域名

curl http://www.tes1.com
this is page of Real_Server1:182.148.15.238 www.test1.com
 curl http://www.test2.com
this is page of Real_Server2:182.148.15.238 www.test2.com
 
關閉真實服務器2的nginx,發現對應域名請求到real_server 上
usr/local/nginx/sbin/nginx -s stop
lsof -i:80
再次在LVS_Keepalived_Master 和 LVS_Keepalived_Backup兩台機器上訪問這兩個域名,就會發現已經負載到Real_Server1上了
curl http://www.test1.com
this is page of Real_Server1:182.148.15.233 www.test1.com
curl http://www.test2.com
this is page of Real_Server1:182.148.15.233 www.test2.com
 
curl http://www.test1.com
this is page of Real_Server1:182.148.15.233 www.test1.com
curl http://www.test2.com
this is page of Real_Server1:182.148.15.233 www.test2.com
 
另外,設置這兩台Real Server的iptables,讓其80端口只對前面的兩個vip資源開放
 vim /etc/sysconfig/iptables
......
-A INPUT -s 182.148.15.239 -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT

/etc/init.d/iptables restart

4)測試

將www.test1.com和www.test2.com測試域名解析到VIP:182.148.15.239,然后在瀏覽器里是可以正常訪問的。

1)測試LVS功能(上面Keepalived的lvs配置中,自帶了健康檢查,當后端服務器的故障出現故障后會自動從lvs集群中踢出,當故障恢復后,再自動加入到集群中)
先查看當前LVS集群,如下:發現后端兩台Real Server的80端口都運行正常
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  182.148.15.239:80 wrr persistent 50
  -> 182.148.15.233:80            Route   3      0          0       
  -> 182.148.15.238:80            Route   3      0          0
  
現在測試關閉一台Real Server,比如Real_Server2
[root@Real_Server2 ~]# /usr/local/nginx/sbin/nginx -s stop
  
過一會兒再次查看當前LVS集群,如下:發現Real_Server2已經被踢出當前LVS集群了
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  182.148.15.239:80 wrr persistent 50
  -> 182.148.15.233:80            Route   3      0          0
  
最后重啟Real_Server2的80端口,發現LVS集群里又再次將其添加進來了
[root@Real_Server2 ~]# /usr/local/nginx/sbin/nginx
  
[root@LVS_Keepalived_Master ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  182.148.15.239:80 wrr persistent 50
  -> 182.148.15.233:80            Route   3      0          0       
  -> 182.148.15.238:80            Route   3      0          0 
  
以上測試中,http://www.wangshibo.com和http://www.guohuihui.com域名訪問都不受影響。
  
  
2)測試Keepalived心跳測試的高可用
默認情況下,VIP資源是在LVS_Keepalived_Master上
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
    inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
    inet 182.148.15.239/32 scope global eth0
    inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
    inet6 fe80::5054:ff:fe68:dcb6/64 scope link
       valid_lft forever preferred_lft forever
  
然后關閉LVS_Keepalived_Master的keepalived,發現VIP就會轉移到LVS_Keepalived_Backup上。
[root@LVS_Keepalived_Master ~]# /etc/init.d/keepalived stop
Stopping keepalived:                                       [  OK  ]
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
    inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
    inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
    inet6 fe80::5054:ff:fe68:dcb6/64 scope link
       valid_lft forever preferred_lft forever
  
查看系統日志,能查看到LVS_Keepalived_Master的VIP的移動信息
[root@LVS_Keepalived_Master ~]# tail -f /var/log/messages
.............
May  8 10:19:36 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: TCP connection to [182.148.15.233]:80 failed.
May  8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: TCP connection to [182.148.15.233]:80 failed.
May  8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: Check on service [182.148.15.233]:80 failed after 1 retry.
May  8 10:19:39 Haproxy_Keepalived_Master Keepalived_healthcheckers[20875]: Removing service [182.148.15.233]:80 from VS [182.148.15.239]:80
  
[root@LVS_Keepalived_Backup ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:7c:b8:f0 brd ff:ff:ff:ff:ff:ff
    inet 182.148.15.236/27 brd 182.148.15.255 scope global eth0
    inet 182.148.15.239/32 scope global eth0
    inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
    inet6 fe80::5054:ff:fe7c:b8f0/64 scope link
       valid_lft forever preferred_lft forever
  
  
接着再重新啟動LVS_Keepalived_Master的keepalived,發現VIP又轉移回來了
[root@LVS_Keepalived_Master ~]# /etc/init.d/keepalived start
Starting keepalived:                                       [  OK  ]
[root@LVS_Keepalived_Master ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 52:54:00:68:dc:b6 brd ff:ff:ff:ff:ff:ff
    inet 182.148.15.237/27 brd 182.148.15.255 scope global eth0
    inet 182.148.15.239/32 scope global eth0
    inet 182.148.15.239/27 brd 182.148.15.255 scope global secondary eth0:0
    inet6 fe80::5054:ff:fe68:dcb6/64 scope link
       valid_lft forever preferred_lft forever
  
  
查看系統日志,能查看到LVS_Keepalived_Master的VIP轉移回來的信息
[root@LVS_Keepalived_Master ~]# tail -f /var/log/messages
.............
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: VRRP_Instance(VI_1) Sending/queueing gratuitous ARPs on eth0 for 182.148.15.239
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239
May  8 10:23:12 Haproxy_Keepalived_Master Keepalived_vrrp[5863]: Sending gratuitous ARP on eth0 for 182.148.15.239

LVS+KEEPALIVED主主熱備的高可用環境部署

主主環境相比於主從環境,區別只在於:
1)LVS負載均衡層需要兩個VIP。比如182.148.15.239和182.148.15.235
2)后端的realserver上要綁定這兩個VIP到lo本地回環口上
3)Keepalived.conf的配置相比於上面的主從模式也有所不同
  
主主架構的具體配置如下:
1)編寫LVS啟動腳本(在Real_Server1 和Real_Server2上都要操作,realserver腳本內容是一樣的)
  
由於后端realserver機器要綁定兩個VIP到本地回環口lo上(分別綁定到lo:0和lo:1),所以需要編寫兩個啟動腳本
 
[root@Real_Server1 ~]# vim /etc/init.d/realserver1
#!/bin/sh
VIP=182.148.15.239
. /etc/rc.d/init.d/functions
    
case "$1" in
  
start)
    /sbin/ifconfig lo down
    /sbin/ifconfig lo up
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
    /sbin/sysctl -p >/dev/null 2>&1
    /sbin/ifconfig lo:0 $VIP netmask 255.255.255.255 up  
    /sbin/route add -host $VIP dev lo:0
    echo "LVS-DR real server starts successfully.\n"
    ;;
stop)
    /sbin/ifconfig lo:0 down
    /sbin/route del $VIP >/dev/null 2>&1
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
    ;;
status)
    isLoOn=`/sbin/ifconfig lo:0 | grep "$VIP"`
    isRoOn=`/bin/netstat -rn | grep "$VIP"`
    if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
        echo "LVS-DR real server has run yet."
    else
        echo "LVS-DR real server is running."
    fi
    exit 3
    ;;
*)
    echo "Usage: $0 {start|stop|status}"
    exit 1
esac
exit 0
  
  
[root@Real_Server1 ~]# vim /etc/init.d/realserver2
#!/bin/sh
VIP=182.148.15.235
. /etc/rc.d/init.d/functions
    
case "$1" in
  
start)
    /sbin/ifconfig lo down
    /sbin/ifconfig lo up
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
    /sbin/sysctl -p >/dev/null 2>&1
    /sbin/ifconfig lo:1 $VIP netmask 255.255.255.255 up   
    /sbin/route add -host $VIP dev lo:1
    echo "LVS-DR real server starts successfully.\n"
    ;;
stop)
    /sbin/ifconfig lo:1 down
    /sbin/route del $VIP >/dev/null 2>&1
    echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
    echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
    echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
echo "LVS-DR real server stopped.\n"
    ;;
status)
    isLoOn=`/sbin/ifconfig lo:1 | grep "$VIP"`
    isRoOn=`/bin/netstat -rn | grep "$VIP"`
    if [ "$isLoON" == "" -a "$isRoOn" == "" ]; then
        echo "LVS-DR real server has run yet."
    else
        echo "LVS-DR real server is running."
    fi
    exit 3
    ;;
*)
    echo "Usage: $0 {start|stop|status}"
    exit 1
esac
exit 0
  
將lvs腳本加入開機自啟動
[root@Real_Server1 ~]# chmod +x /etc/init.d/realserver1
[root@Real_Server1 ~]# chmod +x /etc/init.d/realserver2
[root@Real_Server1 ~]# echo "/etc/init.d/realserver1" >> /etc/rc.d/rc.local
[root@Real_Server1 ~]# echo "/etc/init.d/realserver2" >> /etc/rc.d/rc.local
   
啟動LVS腳本
[root@Real_Server1 ~]# service realserver1 start
LVS-DR real server starts successfully.\n
  
[root@Real_Server1 ~]# service realserver2 start
LVS-DR real server starts successfully.\n
  
查看Real_Server1服務器,發現VIP已經成功綁定到本地回環口lo上了
[root@Real_Server1 ~]# ifconfig
eth0      Link encap:Ethernet  HWaddr 52:54:00:D1:27:75
          inet addr:182.148.15.233  Bcast:182.148.15.255  Mask:255.255.255.224
          inet6 addr: fe80::5054:ff:fed1:2775/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:309741 errors:0 dropped:0 overruns:0 frame:0
          TX packets:27993954 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000
          RX bytes:37897512 (36.1 MiB)  TX bytes:23438654329 (21.8 GiB)
   
lo        Link encap:Local Loopback
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0
          RX bytes:0 (0.0 b)  TX bytes:0 (0.0 b)
   
lo:0      Link encap:Local Loopback
          inet addr:182.148.15.239  Mask:255.255.255.255
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
  
lo:1      Link encap:Local Loopback
          inet addr:182.148.15.235  Mask:255.255.255.255
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
  
  
2)Keepalived.conf的配置
LVS_Keepalived_Master機器上的Keepalived.conf配置
先打開ip_forward路由轉發功能
[root@LVS_Keepalived_Master ~]# echo "1" > /proc/sys/net/ipv4/ip_forward
 
[root@LVS_Keepalived_Master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
    
global_defs {
   router_id LVS_Master
}
    
vrrp_instance VI_1 {
    state MASTER             
    interface eth0           
    virtual_router_id 51     
    priority 100             
    advert_int 1             
    authentication {
        auth_type PASS       
        auth_pass 1111       
    }
    virtual_ipaddress {
        182.148.15.239       
    }
}
    
vrrp_instance VI_2 {
    state BACKUP          
    interface eth0         
    virtual_router_id 52  
    priority 90           
    advert_int 1          
    authentication {
        auth_type PASS     
        auth_pass 1111     
    }
    virtual_ipaddress {
        182.148.15.235   
    }
}
  
virtual_server 182.148.15.239 80 {
    delay_loop 6             
    lb_algo wrr              
    lb_kind DR               
    #nat_mask 255.255.255.0
    persistence_timeout 50   
    protocol TCP            
    
  
    real_server 182.148.15.233 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
  
  
virtual_server 182.148.15.235 80 {
    delay_loop 6             
    lb_algo wrr              
    lb_kind DR               
    #nat_mask 255.255.255.0
    persistence_timeout 50   
    protocol TCP            
    
  
    real_server 182.148.15.233 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
  
  
  
LVS_Keepalived_Backup機器上的Keepalived.conf配置
[root@LVS_Keepalived_Master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
    
global_defs {
   router_id LVS_Backup
}
    
vrrp_instance VI_1 {
    state Backup            
    interface eth0           
    virtual_router_id 51     
    priority 90             
    advert_int 1             
    authentication {
        auth_type PASS       
        auth_pass 1111       
    }
    virtual_ipaddress {
        182.148.15.239       
    }
}
    
vrrp_instance VI_2 {
    state Master         
    interface eth0         
    virtual_router_id 52 
    priority 100          
    advert_int 1          
    authentication {
        auth_type PASS     
        auth_pass 1111     
    }
    virtual_ipaddress {
        182.148.15.235   
    }
}
  
virtual_server 182.148.15.239 80 {
    delay_loop 6             
    lb_algo wrr              
    lb_kind DR               
    #nat_mask 255.255.255.0
    persistence_timeout 50   
    protocol TCP            
    
  
    real_server 182.148.15.233 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
  
  
virtual_server 182.148.15.235 80 {
    delay_loop 6             
    lb_algo wrr              
    lb_kind DR               
    #nat_mask 255.255.255.0
    persistence_timeout 50   
    protocol TCP            
    
  
    real_server 182.148.15.233 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 182.148.15.238 80 {
        weight 3
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
}
  
  
  
其他驗證操作和上面主從模式一樣~


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM