- 第一步mariadb主从配置
- 第二步ProxySql读写分离
- 第三步keepalived高可用proxysql
- 第四步测试mysql读写分离高可用是否成功
第一步mariadb主从配置:
首先配置好mariadb主从同步,这里采用的一主两从,node1和node2为半同步复制,node1和node3为异步复制,也可以全部采用异步复制,根据业务需求配置即可
- 保证各服务器节点时间同步,可参考 时间同步设置方案 http://www.longma.tk/p=629
- 初始化环境 node1和 node2 ,为保证不受其它实验干扰,建议恢复至初始状态,新装的mariadb即可,不要有其它实验项目干扰
主服务器node1配置:
1
2
3
4
5
6
7
8
9
10
11
|
配置文件
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=1
log_bin=mysql-bin
systemctl start mariadb.service
mysql> GRANT REPLICATION SLAVE,REPLICATION CLIENT ON *.* TO 'repluser'@'172.18.43.%' IDENTIFIED BY 'replpass';
mysql> FLUSH PRIVILEGES;
|
从服务器node2配置:
1
2
3
4
5
6
7
8
9
10
11
12
13
|
配置文件
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=2
relay_log=relay-log
systemctl start mariadb.service
mysql> CHANGE MASTER TO MASTER_HOST='172.18.43.8',MASTER_USER='repluser',MASTER_PASSWORD='replpass'
,MASTER_LOG_FILE='mysql-bin.000003',MASTER_LOG_POS=#;
#MASTER_LOG_POS=#; #去主节点show master status查看
mysql> START SLAVE; #启动IO和SQL两个线程
|
主服务器从服务器配置完成以后验证一下主从复制:
1
2
3
|
Node1: CREATE DATABASES mydb;
Node2: SHOW DATABASES; #确定数据库是否可以复制,此时主从复制完成
|
半同步配置:
主节点Node1配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
<br />mysql> INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';
mysql> SHOW PLUGINS; #确定安装完成
mysql> MariaDB [mydb]> SHOW GLOBAL VARIABLES LIKE 'rpl_semi%';
+------------------------------------+-------+
| Variable_name | Value |
+------------------------------------+-------+
| rpl_semi_sync_master_enabled | OFF |
| rpl_semi_sync_master_timeout | 10000 | #单位毫秒,等待从节点超时时长
| rpl_semi_sync_master_trace_level | 32 | #定义日志级别
| rpl_semi_sync_master_wait_no_slave | ON | # 没有从节点要不要等待
+------------------------------------+-------+
mysql> SET GLOBAL rpl_semi_sync_master_enabled=ON;
mysql> SHOW GLOBAL STATUS LIKE '%semi%'; #发下同步客户端还是0,需要重启从节点的IO_THREAD,默认是异步
+--------------------------------------------+-------+
| Variable_name | Value |
+--------------------------------------------+-------+
| Rpl_semi_sync_master_clients | 1 |
| Rpl_semi_sync_master_net_avg_wait_time | 987 |
| Rpl_semi_sync_master_net_wait_time | 987 |
| Rpl_semi_sync_master_net_waits | 1 |
|
从节点Node2配置:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
mysql> INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';
mysql> SHOW PLUGINS; #确定安装完成
mysql> SHOW GLOBAL VARIABLES LIKE 'rpl_semi%';
+---------------------------------+-------+
| Variable_name | Value |
+---------------------------------+-------+
| rpl_semi_sync_slave_enabled | OFF |
| rpl_semi_sync_slave_trace_level | 32 |
+---------------------------------+-------+
mysql> SET GLOBAL rpl_semi_sync_slave_enabled=ON;
mysql> STOP SLAVE IO_THREAD;
mysql> START SLAVE IO_THREAD;
|
测试半同步复制是否成功
1
2
3
4
5
|
mysql> use mydb;
mysql> CREATE TABLE tbl1 (id INT,name VARCHAR(100)); #主节点创建表
mysql> SHOW GLOBAL STATUS LIKE '%semi%'
mysql> SHOW TABLES; #然后去从节点查看 表是否生成
|
添加第二台从服务器
1
2
3
4
5
6
7
8
|
Node3:
vim /etc/my.cnf.d/server.cnf
[mysqld]
skip_name_resolve=ON
innodb_file_per_table=ON
server_id=3
relay_log=relay-log
|
备份主节点数据并恢复到node3
1
2
3
4
|
root@node1# mysqldump --all-databases --master-data=2 --routine --trigger --event --lock-all-tables >/root/all.sql
less /root/all.sql #记录了二进制日志该在哪里开始复制
mysql <all.sql #在node3导入all.sql数据库
|
在node3从服务器执行在哪个postion开始复制
1
2
3
4
5
6
7
8
|
mysql> CHANGE MASTER TO MASTER_HOST='172.18.43.8',MASTER_USER='repluser',MASTER_PASSWORD='replpass',MASTER_LOG_FILE='mysql-bin.000003',MASTER_LOG_POS=1069;
#注意默认从服务器是异步的,如果向做成半同步的,可以安装上面一个插件,具体操作可以看半自动复制,这里我们采用异步
mysql> START SLAVE;
mysql> SHOW SLAVE STATUS\G; #查看IO及sql线程 是否启动了
测试:
node1: CREATE DATABASE hidb;
查看其它节点是否都同步,如果都同步了下面我们做
MYSQL读写分离
|
第二步ProxySql读写分离
1.下载及安装proxysql,直接去 Proxysql官网 下载即可
2. yum install ./proxysql-1.3.6-1-centos7.x86_64.rpm 安装我们下载的proxysql
proxysql.cnf 配置
vim /etc/proxysql.cnf #将原先配置文件重命名,直接新建此文件添加以下配置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
|
datadir="/var/lib/proxysql"
admin_variables=
{
admin_credentials="admin:admin"
mysql_ifaces="127.0.0.1:6032;/tmp/proxysql_admin.sock"
}
mysql_variables=
{
threads=4
max_connections=2048
default_query_delay=0
default_query_timeout=36000000
have_compress=true
poll_timeout=2000
interfaces="0.0.0.0:3306;/tmp/mysql.sock"
default_schema="information_schema"
stacksize=1048576
server_version="5.5.30"
connect_timeout_server=3000
monitor_history=600000
monitor_connect_interval=60000
monitor_ping_interval=10000
monitor_read_only_interval=1500
monitor_read_only_timeout=500
ping_interval_server=120000
ping_timeout_server=500
commands_stats=true
sessions_sort=true
connect_retries_on_failure=10
}
mysql_servers =
(
{
address = "172.18.43.8" # no default, required . If port is 0 , address is interpred as a Unix Socket Domain
port = 3306 # no default, required . If port is 0 , address is interpred as a Unix Socket Domain
hostgroup = 0 # no default, required
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
},
{
address = "172.18.43.88"
port = 3306
hostgroup = 1
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
},
{
address = "172.18.43.89"
port = 3306
hostgroup = 1
status = "ONLINE" # default: ONLINE
weight = 1 # default: 1
compression = 0 # default: 0
}
)
mysql_users:
(
{
username = "root"
password = "mageedu"
default_hostgroup = 0
max_connections=1000
default_schema="mydb"
active = 1
}
)
mysql_query_rules:
(
{
rule_id=1
active=1
match_pattern="^SELECT .* FOR UPDATE$"
destination_hostgroup=0
apply=1
}
)
scheduler=
(
)
mysql_replication_hostgroups=
(
{
writer_hostgroup=0
reader_hostgroup=1
}
)
|
主节点node1授权复制账号
1
2
3
4
5
6
7
8
|
mysql> GRANT ALL ON *.* TO 'root'@'172.18.43.%' IDENTIFIED BY 'mageedu'; #proxysql需要授权一个账号连接后端的mysql,因为我们在节点授权,从节点账号也就自己能给同步过去了
root@node1# service proxysql start #启动proxysql
ss -tnl #默认是4线程的
mysql -uadmin -h127.0.0.1 -padmin -P 6032 #登录proxysql本地管理接口
mysql> use main;
mysql> SELECT * FROM mysql_servers; #会看到我们的mysql复制集群
mysql> select * from mysql_users;
|
测试:
1
2
3
4
5
6
7
8
9
10
11
12
|
node1:
mysql -uroot -h172.18.43.200 -pmageedu
mysql> use mydb;
mysql> SHOW TABLES;
mysql> CREATE TABLE tbl3(name CHAR(20));
mysql> SHOW TABLES; #然后我们去后端三个mysql去查看一下,有则读写分离成功
root@n1#
mysql -uroot -h172.18.43.200 -pmageedu -e "INSERT INTO mydb.tbl3 VALUES ('tom'),('jerry')";
mysql> SELECT * FROM mydb.tbl3; #去三个节点看一下
|
配置第二台Proxysql
1
2
3
|
yum install ./proxysql-1.3.6-1-centos7.x86_64.rpm 安装我们下载的proxysql
scp /etc/proxysql.cnf root@n2:/etc/ #直接将第一台proxysql配置文件传给第二台 proxysql
|
测试第二台proxysql n2
1
2
3
4
5
6
|
mysql -uroot -h172.18.43.100 -pmageedu
mysql> use mydb;
mysql> SHOW TABLES;
mysql> CREATE TABLE tbl4(name CHAR(20));
mysql> SHOW TABLES; #然后我们去后端三个mysql去查看一下,有则读写分离成功
|
第三步keepalived高可用proxysql
n1和n2两个节点安装keepalived
1
2
|
yum install keepalived -y
|
keepalived.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from keepalive@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node1
vrrp_mcast_group4 224.0.101.66
}
vrrp_instance myr {
state MASTER
interface eno16777736
virtual_router_id 67
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 250250
}
virtual_ipaddress {
172.16.43.43/16 dev eno16777736
}
}
|
1
2
3
4
5
6
|
node2
scp node1:/etc/keepalived/keepalived.conf root@node2:/etc/keepalived
state BACKUP #修改状态为备用
priority 98 #修改优先级小于master
|
测试:
- 先启动备用服务器systemctl start keepalived.service
1
2
3
4
5
6
7
8
9
10
|
Note: 此时备用服务器没有发现主节点,自动转为主节点,并配置vip
systemctl status keepalived.service 查看日志
ip addr list 查看ip是否添加vip
May 13 17:21:33 localhost.localdomain Keepalived_healthcheckers[1490]: Using LinkWatch kernel netlink reflec.....
May 13 17:21:36 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) Transition to MASTER STATE
May 13 17:21:37 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) Entering MASTER STATE
May 13 17:21:37 localhost.localdomain Keepalived_vrrp[1491]: VRRP_Instance(myr) setting protocol VIPs.
May 13 17:21:37 localhost.localdomain Keepalived_healthcheckers[1490]: Netlink reflector reports IP 172.16.0...e
|
- 启用主节点服务器:
1
2
3
4
5
6
7
|
systemctl start keepalived.service
Note:去备用节点查看状态systemctl status keepalived.service 发现状态已经被优先级高的抢为master
此时本机变为backup,而且从服务器已经删除vip
查看主节点状态信息systemctl status keepalived.service
ip addr list #查看ip是否漂移成功
|
第四步测试mysql读写分离高可用是否成功
1
2
3
4
5
6
7
|
mysql -uroot -h172.18.43.43 -pmageedu #访问VIP地址,确定可以正常连接
CREATE DATABASE hellodb; #去三个mariadb节点手动查看hellodb数据库是否创建成功
systemctl stop keepalived #停止n1的proxysql测试proxysql是否还可以正常访问
mysql -uroot -h172.18.43.43 -pmageedu
use hellodb;
CREATE TABLE tbl6 (name VARCHAR(20)); #去我们的三个mariadb查看表是否创建成功
|