Nginx-keepalived+Nginx實現高可用集群


Keepalived+Nginx 高可用集群(主從模式)

集群架構圖:

說明:Keepalived機器同樣是nginx負載均衡器。

1)實驗環境准備(此處都是使用的centos7系統)

# cat /etc/redhat-release 
CentOS Linux release 7.4.1708 (Core)

在所有節點上面進行配置

# systemctl stop firewalld         //關閉防火牆
# sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/sysconfig/selinux        //關閉selinux,重啟生效
# setenforce 0                //關閉selinux,臨時生效
# ntpdate 0.centos.pool.ntp.org    //時間同步
# yum install nginx -y           //安裝nginx

2)配置后端web服務器(兩台一樣)

# echo "`hostname` `ifconfig ens33 |sed -n 's#.*inet \(.*\)netmask.*#\1#p'`" > /usr/share/nginx/html/index.html        //准備測試文件,此處是將主機名和ip寫到index.html頁面中
# vim /etc/nginx/nginx.conf        //編輯配置文件
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
    worker_connections 1024;
}
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;
    include /etc/nginx/conf.d/*.conf;
    server {
        listen       80;
        server_name  www.mtian.org;
        location / {
            root         /usr/share/nginx/html;
        }
    access_log    /var/log/nginx/access.log main;
    }
}
# systemctl start nginx    //啟動nginx
# systemctl enable nginx    //加入開機啟動

3)配置LB服務器(兩台都一樣)

# vim /etc/nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
    worker_connections 1024;
}
http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;
    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;
    include /etc/nginx/conf.d/*.conf;
    upstream backend {
    server 192.168.1.33:80 weight=1 max_fails=3 fail_timeout=20s;
    server 192.168.1.34:80 weight=1 max_fails=3 fail_timeout=20s;
    }
    server {
        listen       80;
        server_name  www.mtian.org;
        location / {
        proxy_pass http://backend;
        proxy_set_header Host $host:$proxy_port;
        proxy_set_header X-Forwarded-For $remote_addr;
        }
    }
}
# systemctl start nginx     //啟動nginx      
# systemctl enable nginx    //加入開機自啟動

4)在測試機(192.168.1.35)上面添加host解析,並測試lb集群是否正常。(測試機任意都可以,只要能訪問lb節點。)

[root@node01 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.32    www.mtian.org
192.168.1.31    www.mtian.org
// 測試時候輪流關閉lb1 和 lb2 節點,關閉后還是能夠訪問並看到輪循效果即表示 nginx lb集群搭建成功。
[root@node01 ~]# curl www.mtian.org
web01 192.168.1.33  
[root@node01 ~]# curl www.mtian.org
web02 192.168.1.34  
[root@node01 ~]# curl www.mtian.org
web01 192.168.1.33  
[root@node01 ~]# curl www.mtian.org
web02 192.168.1.34  
[root@node01 ~]# curl www.mtian.org
web01 192.168.1.33  
[root@node01 ~]# curl www.mtian.org
web02 192.168.1.34

5)上面步驟成功后,開始搭建keepalived,在兩台 lb節點上面安裝keepalived(也可以源碼編譯安裝、此處直接使用yum安裝)

# yum install keepalived -y

6)配置 LB-01節點

[root@LB-01 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
    381347268@qq.com
   }
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.1.110/24 dev ens33 label ens33:1
    }
}
[root@LB-01 ~]# systemctl start keepalived     //啟動keepalived
[root@LB-01 ~]# systemctl enable keepalived    //加入開機自啟動
[root@LB-01 ~]# ip a    //查看IP,會發現多出了VIP 192.168.1.110
......
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:94:17:44 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.31/24 brd 192.168.1.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.1.110/24 scope global secondary ens33:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe94:1744/64 scope link 
       valid_lft forever preferred_lft forever
......

7)配置 LB-02節點

[root@LB-02 ~]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
    381347268@qq.com
   }
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.1.110/24 dev ens33 label ens33:1
    }
}
[root@LB-02 ~]# systemctl start keepalived        //啟動keepalived
[root@LB-02 ~]# systemctl enable keepalived    //加入開機自啟動
[root@LB-02 ~]# ifconfig   //查看IP,此時備節點不會有VIP(只有當主掛了的時候,VIP才會飄到備節點)
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.32  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::20c:29ff:feab:6532  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:ab:65:32  txqueuelen 1000  (Ethernet)
        RX packets 43752  bytes 17739987 (16.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4177  bytes 415805 (406.0 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
......

8)在測試機器上面訪問 Keepalived上面配置的VIP 192.168.1.110

[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34  
[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34 
//關閉LB-01 節點上面keepalived主節點。再次訪問
[root@LB-01 ~]# systemctl stop keepalived
[root@node01 ~]# 
[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34  
[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34 
//此時查看LB-01 主節點上面的IP ,發現已經沒有了 VIP
[root@LB-01 ~]# ifconfig 
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.31  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::20c:29ff:fe94:1744  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:94:17:44  txqueuelen 1000  (Ethernet)
        RX packets 46813  bytes 18033403 (17.1 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 9350  bytes 1040882 (1016.4 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
...
//查看LB-02 備節點上面的IP,發現 VIP已經成功飄過來了
[root@LB-02 ~]# ifconfig 
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.32  netmask 255.255.255.0  broadcast 192.168.1.255
        inet6 fe80::20c:29ff:feab:6532  prefixlen 64  scopeid 0x20<link>
        ether 00:0c:29:ab:65:32  txqueuelen 1000  (Ethernet)
        RX packets 44023  bytes 17760070 (16.9 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4333  bytes 430037 (419.9 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

ens33:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 192.168.1.110  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 00:0c:29:ab:65:32  txqueuelen 1000  (Ethernet)
...

到此,Keepalived+Nginx高可用集群(主從)就搭建完成了。

Keepalived+Nginx 高可用集群(雙主模式)

將keepalived做成雙主模式,其實很簡單,就是再配置一段新的vrrp_instance(實例)規則,主上面加配置一個從的實例規則,從上面加配置一個主的實例規則。

集群架構圖:

說明:還是按照上面的環境繼續做實驗,只是修改LB節點上面的keepalived服務的配置文件即可。此時LB-01節點即為Keepalived的主節點也為備節點,LB-02節點同樣即為Keepalived的主節點也為備節點。LB-01節點默認的主節點VIP(192.168.1.110),LB-02節點默認的主節點VIP(192.168.1.210)

1)配置 LB-01 節點

[root@LB-01 ~]# vim /etc/keepalived/keepalived.conf   //編輯配置文件,增加一段新的vrrp_instance規則 ! Configuration File for keepalived

global_defs {
   notification_email {
    381347268@qq.com
   }
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.1.110/24 dev ens33 label ens33:1
    }
}

vrrp_instance VI_2 { state BACKUP interface ens33 virtual_router_id 52
    priority 100
    advert_int 1
    authentication {
    auth_type PASS
    auth_pass 2222
    }
    virtual_ipaddress {
    192.168.1.210/24 dev ens33 label ens33:2
    }
}
[root@LB-01 ~]# systemctl restart keepalived    //重新啟動keepalived
// 查看LB-01 節點的IP地址,發現VIP(192.168.1.110)同樣還是默認在該節點
[root@LB-01 ~]# ip a
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:94:17:44 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.31/24 brd 192.168.1.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.1.110/24 scope global secondary ens33:1
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe94:1744/64 scope link 
       valid_lft forever preferred_lft forever

2)配置 LB-02 節點

[root@LB-02 ~]# vim /etc/keepalived/keepalived.conf    //編輯配置文件,增加一段新的vrrp_instance規則
! Configuration File for keepalived

global_defs {
   notification_email {
    381347268@qq.com
   }
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.1.110/24 dev ens33 label ens33:1
    }
}

vrrp_instance VI_2 { state MASTER interface ens33 virtual_router_id 52
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 2222
    }
    virtual_ipaddress {
        192.168.1.210/24 dev ens33 label ens33:2
    }   
}
[root@LB-02 ~]# systemctl restart keepalived    //重新啟動keepalived
// 查看LB-02節點IP,會發現也多了一個VIP(192.168.1.210),此時該節點也就是一個主了。
[root@LB-02 ~]# ip a
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:ab:65:32 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.32/24 brd 192.168.1.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.1.210/24 scope global secondary ens33:2
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:feab:6532/64 scope link 
       valid_lft forever preferred_lft forever

3)測試

[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34  
[root@node01 ~]# curl 192.168.1.210
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.210
web02 192.168.1.34
// 停止LB-01節點的keepalived再次測試
[root@LB-01 ~]# systemctl stop keepalived
[root@node01 ~]# curl 192.168.1.110
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.110
web02 192.168.1.34  
[root@node01 ~]# curl 192.168.1.210
web01 192.168.1.33  
[root@node01 ~]# curl 192.168.1.210
web02 192.168.1.34

測試可以發現我們訪問keepalived中配置的兩個VIP都可以正常調度等,當我們停止任意一台keepalived節點,同樣還是正常訪問;到此,keepalived+nginx高可用集群(雙主模式)就搭建完成了。

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM