CentOS7 haproxy+keepalived實現高可用集群搭建
1.1 本地操作系統環境
CentOS7 64位
[root@lb03 ~]# cat /etc/centos-release
CentOS Linux release 7.5.1804 (Core)
[root@lb03 ~]# uname -r
3.10.0-862.el7.x86_64
[root@lb03 ~]# rpm -qa haproxy
haproxy-1.5.18-7.el7.x86_64
[root@lb03 ~]# nginx -V
nginx version: nginx/1.12.2
built by gcc 4.8.5 20150623 (Red Hat 4.8.5-16) (GCC)
built with OpenSSL 1.0.2k-fips 26 Jan 2017
TLS SNI support enabled
后端負載主機:192.168.25.71 192.168.25.72 兩台節點上安裝rabbitmq服務
Haproxy 也是安裝在 192.168.25.71 和 192.168.25.72 上,用於對外提供 RabbitMQ 均衡
Keepalived實現haproxy的主備,高可用(避免單點問題),192.168.25.71(主)192.168.25.72(備),虛擬地址VIP 192.168.166.29
rabbitmq配置可以參看
http://blog.csdn.net/sj349781478/article/details/78841382
http://blog.csdn.net/sj349781478/article/details/78845852
第2章 HAproxy安裝
2.1 HAproxy簡介
1)HAProxy提供高可用性、負載均衡以及基於TCP和HTTP應用的代理,支持虛擬主機,它是免費、快速並且可靠的一種解決方案。
2)HAProxy特別適用於那些負載特大的web站點,這些站點通常又需要會話保持或七層處理。
3)HAProxy運行在當前的硬件上,完全可以支持數以萬計的並發連接。並且它的運行模式使得它可以很簡單安全的整合進您當前的架構中, 同時可以保護你的web服務器不被暴露到網絡上。
4)HAProxy實現了一種事件驅動, 單一進程模型,此模型支持非常大的並發連接數。多進程或多線程模型受內存限制 、系統調度器限制以及無處不在的鎖限制,很少能處理數千並發連接。事件驅動模型因為在有更好的資源和時間管理的用戶空間(User-Space) 實現所有這些任務,所以沒有這些問題。此模型的弊端是,在多核系統上,這些程序通常擴展性較差。這就是為什么他們必須進行優化以 使每個CPU時間片(Cycle)做更多的工作。
2.2 HAproxy安裝配置
2.2.1 Haproxy代理rabbitmq 配置文件
[root@lb01 haproxy]# cat haproxy.cfg
###########全局配置#########
global
# log /dev/log local0
# log /dev/log local1 notice
log 127.0.0.1 local0 info
chroot /var/lib/haproxy # 改變當前工作目錄
stats socket /run/haproxy/admin.sock mode 660 level admin # 創建監控所用的套接字目錄
pidfile /var/run/haproxy.pid # haproxy的pid存放路徑,啟動進程的用戶必須有權限訪問此文件
maxconn 4000 # 最大連接數,默認4000
user haproxy # 默認用戶
group haproxy # 默認用戶組
daemon # 創建1個進程進入deamon模式運行。此參數要求將運行模式設置為"daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# Default ciphers to use on SSL-enabled listening sockets.
# For more information, see ciphers(1SSL). This list is from:
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS
ssl-default-bind-options no-sslv3
###########默認配置#########
defaults
log global
mode http # 默認的模式mode { tcp|http|health },tcp是4層,http是7層,health只會返回OK
option httplog # 采用http日志格式
option dontlognull # 啟用該項,日志中將不會記錄空連接。所謂空連接就是在上游的負載均衡器
# 或者監控系統為了探測該 服務是否存活可用時,需要定期的連接或者獲取某
# 一固定的組件或頁面,或者探測掃描端口是否在監聽或開放等動作被稱為空連接;
# 官方文檔中標注,如果該服務上游沒有其他的負載均衡器的話,建議不要使用
# 該參數,因為互聯網上的惡意掃描或其他動作就不會被記錄下來
timeout connect 5000 # 連接超時時間
timeout client 50000 # 客戶端連接超時時間
timeout server 50000 # 服務器端連接超時時間
option httpclose # 每次請求完畢后主動關閉http通道
option httplog # 日志類別http日志格式
#option forwardfor # 如果后端服務器需要獲得客戶端真實ip需要配置的參數,可以從Http Header中獲得客戶端ip
option redispatch # serverId對應的服務器掛掉后,強制定向到其他健康的服務器
timeout connect 10000 # default 10 second timeout if a backend is not found
maxconn 60000 # 最大連接數
retries 3 # 3次連接失敗就認為服務不可用,也可以通過后面設置
# errorfile 400 /etc/haproxy/errors/400.http
# errorfile 403 /etc/haproxy/errors/403.http
# errorfile 408 /etc/haproxy/errors/408.http
# errorfile 500 /etc/haproxy/errors/500.http
# errorfile 502 /etc/haproxy/errors/502.http
# errorfile 503 /etc/haproxy/errors/503.http
# errorfile 504 /etc/haproxy/errors/504.http
####################################################################
listen http_front
bind 0.0.0.0:1080
stats refresh 30s
stats uri /haproxy?stats
stats realm Haproxy Manager
stats auth admin:admin
#stats hide-version
#####################我把RabbitMQ的管理界面也放在HAProxy后面了###############################
listen rabbitmq_admin
bind 0.0.0.0:8004
server node1 192.168.25.73:15672
server node2 192.168.25.74:15672
server node3 192.168.25.75:15672
####################################################################
listen rabbitmq_cluster
bind 0.0.0.0:80
option tcplog
mode tcp
timeout client 3h
timeout server 3h
option clitcpka
balance roundrobin
#balance url_param userid
#balance url_param session_id check_post 64
#balance hdr(User-Agent)
#balance hdr(host)
#balance hdr(Host) use_domain_only
#balance rdp-cookie
#balance leastconn
#balance source //ip
server node1 192.168.25.73:15672 check inter 5s rise 2 fall 3
server node2 192.168.25.74:15672 check inter 5s rise 2 fall 3
server node3 192.168.25.75:15672 check inter 5s rise 2 fall 3
2.2.2 Haproxy代理nginx
1、安裝haproxy
# yum install haproxy -y
2、編輯配置文件
[root@lb02 ~]# grep -Ev '^$|^#' /etc/haproxy/haproxy.cfg
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local0 info
#chroot /var/lib/haproxy
#pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
#stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend main
bind *:80
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js
use_backend static if url_static
default_backend nginx
backend static
balance roundrobin
server static 127.0.0.1:80 check
backend nginx
balance roundrobin
server nginx1 192.168.25.73:80 check inter 2000 fall 3 weight 30
server nginx2 192.168.25.74:80 check inter 2000 fall 3 weight 30
server nginx3 192.168.25.75:80 check inter 2000 fall 3 weight 30
3、啟動
# haproxy -f /etc/haproxy/haproxy.cfg
4、重啟動
# service haproxy restart
5、查看haproxy是否已經啟動
6、haproxy啟用監控頁面
編輯haproxy.cfg 加上下面參數
listen admin_stats
stats enable
bind *:9090 //監聽的ip端口號
mode http //開關
option httplog
log global
maxconn 10
stats refresh 30s //統計頁面自動刷新時間
stats uri /admin //訪問的uri ip:8080/admin
stats realm haproxy
stats auth admin:Redhat //認證用戶名和密碼
stats hide-version //隱藏HAProxy的版本號
stats admin if TRUE //管理界面,如果認證成功了,可通過webui管理節點
保存退出后
重起service haproxy restart
然后訪問 http://192.168.25.72:9090/admin 用戶名:admin 密碼:Redhat
---------------------------------------------------------------------------------------------
參數舉例說明:【/usr/local/haproxy/haproxy.cfg】
###########全局配置#########
global
log 127.0.0.1 local0 #[日志輸出配置,所有日志都記錄在本機,通過local0輸出]
log 127.0.0.1 local1 notice #定義haproxy 日志級別[error warringinfo debug]
daemon #以后台形式運行harpoxy
nbproc 1 #設置進程數量
maxconn 4096 #默認最大連接數,需考慮ulimit-n限制
#user haproxy #運行haproxy的用戶
#group haproxy #運行haproxy的用戶所在的組
#pidfile /var/run/haproxy.pid #haproxy 進程PID文件
#ulimit-n 819200 #ulimit 的數量限制
#chroot /usr/share/haproxy #chroot運行路徑
#debug #haproxy 調試級別,建議只在開啟單進程的時候調試
#quiet
########默認配置############
defaults
log global
mode http #默認的模式mode { tcp|http|health },tcp是4層,http是7層,health只會返回OK
option httplog #日志類別,采用httplog
option dontlognull #不記錄健康檢查日志信息
retries 2 #兩次連接失敗就認為是服務器不可用,也可以通過后面設置
#option forwardfor #如果后端服務器需要獲得客戶端真實ip需要配置的參數,可以從Http Header中獲得客戶端ip
option httpclose #每次請求完畢后主動關閉http通道,haproxy不支持keep-alive,只能模擬這種模式的實現
#option redispatch #當serverId對應的服務器掛掉后,強制定向到其他健康的服務器,以后將不支持
option abortonclose #當服務器負載很高的時候,自動結束掉當前隊列處理比較久的鏈接
maxconn 4096 #默認的最大連接數
timeout connect 5000ms #連接超時
timeout client 30000ms #客戶端超時
timeout server 30000ms #服務器超時
#timeout check 2000 #心跳檢測超時
#timeout http-keep-alive10s #默認持久連接超時時間
#timeout http-request 10s #默認http請求超時時間
#timeout queue 1m #默認隊列超時時間
balance roundrobin #設置默認負載均衡方式,輪詢方式
#balance source #設置默認負載均衡方式,類似於nginx的ip_hash
#balnace leastconn #設置默認負載均衡方式,最小連接數
########統計頁面配置########
listen stats
bind 0.0.0.0:1080 #設置Frontend和Backend的組合體,監控組的名稱,按需要自定義名稱
mode http #http的7層模式
option httplog #采用http日志格式
#log 127.0.0.1 local0 err #錯誤日志記錄
maxconn 10 #默認的最大連接數
stats refresh 30s #統計頁面自動刷新時間
stats uri /stats #統計頁面url
stats realm XingCloud\ Haproxy #統計頁面密碼框上提示文本
stats auth admin:admin #設置監控頁面的用戶和密碼:admin,可以設置多個用戶名
stats auth Frank:Frank #設置監控頁面的用戶和密碼:Frank
stats hide-version #隱藏統計頁面上HAProxy的版本信息
stats admin if TRUE #設置手工啟動/禁用,后端服務器(haproxy-1.4.9以后版本)
########設置haproxy 錯誤頁面#####
#errorfile 403 /home/haproxy/haproxy/errorfiles/403.http
#errorfile 500 /home/haproxy/haproxy/errorfiles/500.http
#errorfile 502 /home/haproxy/haproxy/errorfiles/502.http
#errorfile 503 /home/haproxy/haproxy/errorfiles/503.http
#errorfile 504 /home/haproxy/haproxy/errorfiles/504.http
########frontend前端配置##############
frontend main
bind *:80 #這里建議使用bind *:80的方式,要不然做集群高可用的時候有問題,vip切換到其他機器就不能訪問了。
acl web hdr(host) -i www.abc.com #acl后面是規則名稱,-i為忽略大小寫,后面跟的是要訪問的域名,如果訪問www.abc.com這個域名,就觸發web規則,。
acl img hdr(host) -i img.abc.com #如果訪問img.abc.com這個域名,就觸發img規則。
use_backend webserver if web #如果上面定義的web規則被觸發,即訪問www.abc.com,就將請求分發到webserver這個作用域。
use_backend imgserver if img #如果上面定義的img規則被觸發,即訪問img.abc.com,就將請求分發到imgserver這個作用域。
default_backend dynamic #不滿足則響應backend的默認頁面
########backend后端配置##############
backend webserver #webserver作用域
mode http
balance roundrobin #balance roundrobin 負載輪詢,balance source 保存session值,支持static-rr,leastconn,first,uri等參數
option httpchk /index.html HTTP/1.0 #健康檢查, 檢測文件,如果分發到后台index.html訪問不到就不再分發給它
server web1 10.16.0.9:8085 cookie 1 weight 5 check inter 2000 rise 2 fall 3
server web2 10.16.0.10:8085 cookie 2 weight 3 check inter 2000 rise 2 fall 3
#cookie 1表示serverid為1,check inter 1500 是檢測心跳頻率
#rise 2是2次正確認為服務器可用,fall 3是3次失敗認為服務器不可用,weight代表權重
backend imgserver
mode http
option httpchk /index.php
balance roundrobin
server img01 192.168.137.101:80 check inter 2000 fall 3
server img02 192.168.137.102:80 check inter 2000 fall 3
backend dynamic
balance roundrobin
server test1 192.168.1.23:80 check maxconn 2000
server test2 192.168.1.24:80 check maxconn 2000
listen tcptest
bind 0.0.0.0:5222
mode tcp
option tcplog #采用tcp日志格式
balance source
#log 127.0.0.1 local0 debug
server s1 192.168.100.204:7222 weight 1
server s2 192.168.100.208:7222 weight 1
---------------------------------------------------------------------------------------------
第3章 Keepalived安裝配置
3.1 Keepalived介紹
keepalived是一個免費開源的,用C編寫的類似於layer3, 4 & 7交換機制軟件,具備我們平時說的第3層、第4層和第7層交換機的功能。主要提供loadbalancing(負載均衡)和 high-availability(高可用)功能,負載均衡實現需要依賴Linux的虛擬服務內核模塊(ipvs),而高可用是通過VRRP協議實現多台機器之間的故障轉移服務。
3.2 配置防火牆
3.2.1 Centos7-firewalld解決keepalived的VIP問題
keepalived的VIP問題
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --in-interface eth0 --destination 224.0.0.18 --protocol vrrp -j ACCEPT
3.2.2 firewalld 防火牆常用操作
語法命令如下:啟用區域端口和協議組合
firewall-cmd [--zone=<zone>] --add-port=<port>[-<port>]/<protocol> [--timeout=<seconds>]
此舉將啟用端口和協議的組合。
端口可以是一個單獨的端口 <port> 或者是一個端口范圍 <port>-<port>。
協議可以是 tcp 或 udp。
查看 firewalld 狀態
systemctl status firewalld
開啟 firewalld
systemctl start firewalld
開放端口
// --permanent 永久生效,沒有此參數重啟后失效
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=9090/tcp --permanent
firewall-cmd --zone=public --add-port=1000-2000/tcp --permanent
keepalived的VIP問題
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --in-interface eth0 --destination 224.0.0.18 --protocol vrrp -j ACCEPT
重新載入
firewall-cmd --reload
查看
firewall-cmd --zone=public --query-port=80/tcp
刪除
firewall-cmd --zone=public --remove-port=80/tcp --permanent
iptables 防火牆
也可以還原傳統的管理方式使用 iptables
systemctl stop firewalld
systemctl mask firewalld
安裝 iptables-services
yum install iptables-services
設置開機啟動
systemctl enable iptables
操作命令
systemctl stop iptables
systemctl start iptables
systemctl restart iptables
systemctl reload iptables
保存設置
service iptables save
開放某個端口 在 /etc/sysconfig/iptables 里添加
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 8080 -j ACCEPT
3.3 安裝keepalived
yum install keepalived –y
systemctl enable keepalived
3.4 配置文件
3.4.1 Master節點
3.4.1.1 keepalived.conf
[root@lb01 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id LB01
}
vrrp_script chk_haproxy
{
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance haproxy {
state MASTER
interface eth0
virtual_router_id 1
priority 100
authentication {
auth_type PASS
auth_pass password
}
virtual_ipaddress {
192.168.25.229
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}
3.4.1.2 haproxy_check.sh
[root@lb01 ~]# cat /etc/keepalived/scripts/haproxy_check.sh
#!/bin/bash
LOGFILE="/var/log/keepalived-haproxy-state.log"
date >>$LOGFILE
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
echo "fail: check_haproxy status" >>$LOGFILE
exit 1
else
echo "success: check_haproxy status" >>$LOGFILE
exit 0
fi
3.4.1.3 haproxy_master.sh
[root@lb01 ~]# cat /etc/keepalived/scripts/haproxy_master.sh
#!/bin/bash
LOGFILE="/var/log/keepalived-haproxy-state.log"
echo "Being Master ..." >> $LOGFILE
3.4.2 Backup節點
3.4.2.1 keepalived.conf
[root@lb02 ~]# cat /etc/keepalived/keepalived.conf
global_defs {
router_id LB02
}
vrrp_script chk_haproxy
{
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 2
timeout 2
fall 3
}
vrrp_instance haproxy {
state BACKUP
interface eth0
virtual_router_id 1
priority 50
authentication {
auth_type PASS
auth_pass password
}
virtual_ipaddress {
192.168.25.229
}
track_script {
chk_haproxy
}
notify_master "/etc/keepalived/scripts/haproxy_master.sh"
}
[root@lb02 ~]#
3.4.2.2 haproxy_check.sh
[root@lb01 ~]# cat /etc/keepalived/scripts/haproxy_check.sh
#!/bin/bash
LOGFILE="/var/log/keepalived-haproxy-state.log"
date >>$LOGFILE
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ];then
echo "fail: check_haproxy status" >>$LOGFILE
exit 1
else
echo "success: check_haproxy status" >>$LOGFILE
exit 0
fi
[root@lb01 ~]#
3.4.2.3haproxy_master.sh
[root@lb01 ~]# cat /etc/keepalived/scripts/haproxy_master.sh
#!/bin/bash
LOGFILE="/var/log/keepalived-haproxy-state.log"
echo "Being Master ..." >> $LOGFILE
[root@lb01 ~]#
3.5 啟動keepalived
3.5.1 啟動服務
#keepalived –D
systemctl start keepalived
3.5.2 查看keepalived是否啟動
[root@lb01 ~]# ps -ef|grep kee
root 24290 1 0 10:59 ? 00:00:00 /usr/sbin/keepalived -D
root 24291 24290 0 10:59 ? 00:00:00 /usr/sbin/keepalived -D
root 24292 24290 0 10:59 ? 00:00:00 /usr/sbin/keepalived -D
root 28622 13717 0 11:17 pts/1 00:00:00 grep --color=auto kee
[root@lb01 ~]#
3.5.3 vip檢查
3.6 切換測試
3.6.1 關閉主keepalived
Master關閉keepalived服務,VIP是否切換至備機,業務是否正常,恢復原狀;(驗證keepalived高可用)
Master
Keepalived_ 日志
[root@lb01 ~]# systemctl stop keepalived
Nov 22 11:19:49 lb01 systemd: Stopping LVS and VRRP High Availability Monitor...
Nov 22 11:19:49 lb01 Keepalived[24290]: Stopping
Nov 22 11:19:49 lb01 Keepalived_vrrp[24292]: VRRP_Instance(haproxy) sent 0 priority
Nov 22 11:19:49 lb01 Keepalived_vrrp[24292]: VRRP_Instance(haproxy) removing protocol VIPs.
Nov 22 11:19:49 lb01 Keepalived_healthcheckers[24291]: Stopped
Nov 22 11:19:50 lb01 Keepalived_vrrp[24292]: Stopped
Nov 22 11:19:50 lb01 Keepalived[24290]: Stopped Keepalived v1.3.5 (03/19,2017), git commit v1.3.5-6-g6fa32f2
Nov 22 11:19:50 lb01 systemd: Stopped LVS and VRRP High Availability Monitor.
Backup
[root@lb02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:0b:29:c6 brd ff:ff:ff:ff:ff:ff
inet 192.168.25.72/16 brd 192.168.255.255 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.25.229/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe0b:29c6/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:1a:83:4d brd ff:ff:ff:ff:ff:ff
inet6 fe80::5054:ff:fe1a:834d/64 scope link
valid_lft forever preferred_lft forever
Keepalived_ 日志
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Entering MASTER STATE
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) setting protocol VIPs.
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Sending/queueing gratuitous ARPs on eth0 for 192.168.25.229
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:51 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Sending/queueing gratuitous ARPs on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:19:56 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
3.6.2 關閉主HAproxy
關閉主HAproxy ,VIP是否切換至備機,業務是否正常,恢復原狀;(驗證HAproxy高可用)
Master
系統日志
Nov 22 11:29:43 lb01 systemd: Stopping HAProxy Load Balancer...
Nov 22 11:29:43 lb01 systemd: haproxy.service: main process exited, code=exited, status=143/n/a
Nov 22 11:29:43 lb01 systemd: Stopped HAProxy Load Balancer.
Nov 22 11:29:43 lb01 systemd: Unit haproxy.service entered failed state.
Nov 22 11:29:43 lb01 systemd: haproxy.service failed.
Nov 22 11:29:44 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Nov 22 11:29:46 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Nov 22 11:29:48 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Nov 22 11:29:48 lb01 Keepalived_vrrp[29295]: VRRP_Script(chk_haproxy) failed
Nov 22 11:29:48 lb01 Keepalived_vrrp[29295]: VRRP_Instance(haproxy) Entering FAULT STATE
Nov 22 11:29:48 lb01 Keepalived_vrrp[29295]: VRRP_Instance(haproxy) removing protocol VIPs.
Nov 22 11:29:48 lb01 Keepalived_vrrp[29295]: VRRP_Instance(haproxy) Now in FAULT state
Nov 22 11:29:50 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Nov 22 11:29:52 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Nov 22 11:29:54 lb01 Keepalived_vrrp[29295]: /etc/keepalived/scripts/haproxy_check.sh exited with status 1
Backup
系統日志
Nov 22 11:29:49 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Transition to MASTER STATE
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Entering MASTER STATE
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) setting protocol VIPs.
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Sending/queueing gratuitous ARPs on eth0 for 192.168.25.229
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:50 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: VRRP_Instance(haproxy) Sending/queueing gratuitous ARPs on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Nov 22 11:29:55 lb02 Keepalived_vrrp[26670]: Sending gratuitous ARP on eth0 for 192.168.25.229
Vip檢查
Master
[root@lb01 ~]# ip a|grep 192.168.25.229
[root@lb01 ~]#
Backup
[root@lb02 ~]# ip a|grep 192.168.25.229
inet 192.168.25.229/32 scope global eth0
3)關閉后台服務器nginx 01,業務是否正常。(驗證HAproxy狀態檢查)
[root@lb03 ~]# systemctl stop nginx
Nov 22 11:34:50 localhost haproxy[31563]: Server nginx/nginx1 is DOWN, reason: Layer4 connection problem, info: "Connection refused", check duration: 0ms. 2 active and 0 backup servers left. 0 sessions active, 0 requeued, 0 remaining in queue.
[root@lb03 ~]# systemctl start nginx
Nov 22 11:36:10 localhost haproxy[31563]: Server nginx/nginx1 is UP, reason: Layer4 check passed, check duration: 0ms. 3 active and 0 backup servers online. 0 sessions requeued, 0 total in queue.
keepalived參考:http://blog.51cto.com/lanlian/1303195
haproxy監控頁面配置參考:http://blog.csdn.net/dylan_csdn/article/details/51261421
