--------------------------------------------------------------------------------------------------------------------------------------------
CentOS7 配置如下
CentOS7 配置如下
5.7.22 Group Replication
MySQL5.7.22
安裝略
MySQL5.7.22
安裝略
在三台db服務器上面設置/etc/hosts映射,如下:
192.168.1.101 mydb1
192.168.1.102 mydb2
192.168.1.103 mydb3
192.168.1.101 mydb1
192.168.1.102 mydb2
192.168.1.103 mydb3
安裝的數據庫服務器:
數據庫服務器地址 端口 數據目錄 Server-id
192.168.1.101(mydb1) 3306 /app/mysqldata/3306/data/ 1013306
192.168.1.102(mydb2) 3306 /app/mysqldata/3306/data/ 1023306
192.168.1.103(mydb3) 3306 /app/mysqldata/3306/data/ 1033306
數據庫服務器地址 端口 數據目錄 Server-id
192.168.1.101(mydb1) 3306 /app/mysqldata/3306/data/ 1013306
192.168.1.102(mydb2) 3306 /app/mysqldata/3306/data/ 1023306
192.168.1.103(mydb3) 3306 /app/mysqldata/3306/data/ 1033306
在3台mysql實例上mydb1、mydb2、mydb3分配賬號:
mysql> set sql_log_bin=0;
mysql> GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.%' IDENTIFIED BY 'repl';
mysql> set sql_log_bin=1;
mysql> set sql_log_bin=0;
mysql> GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.%' IDENTIFIED BY 'repl';
mysql> set sql_log_bin=1;
在3台my.cnf上面配置gtid:
[mysqld]
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb1:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
[mysqld]
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb1:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
[mysqld]
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb2:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb2:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
[mysqld]
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb3:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
--------------
group_replication_allow_local_disjoint_gtids_join=off
group_replication_member_weight=80
group_replication_unreachable_majority_timeout=5
group_replication_compression_threshold=131072
group_replication_transaction_size_limit=20971520
gtid_mode=ON
log-slave-updates=ON
enforce-gtid-consistency=ON
master_info_repository=TABLE
relay_log_info_repository=TABLE
binlog_checksum=none
#group replication config
plugin-load = group_replication.so
transaction_write_set_extraction = XXHASH64
group_replication_start_on_boot = OFF
group_replication_bootstrap_group = OFF
group_replication_group_name = '__GROUP_UUID__'
group_replication_local_address = 'mydb3:6606'
group_replication_group_seeds = 'mydb1:6606,mydb2:6606,mydb3:6606'
group_replication_single_primary_mode = true
group_replication_enforce_update_everywhere_checks = false
--------------
group_replication_allow_local_disjoint_gtids_join=off
group_replication_member_weight=80
group_replication_unreachable_majority_timeout=5
group_replication_compression_threshold=131072
group_replication_transaction_size_limit=20971520
重啟3台mysql服務
mysql> show plugins;
+----------------------------+----------+--------------------+----------------------+---------+
| Name | Status | Type | Library | License |
+----------------------------+----------+--------------------+----------------------+---------+
| binlog | ACTIVE | STORAGE ENGINE | NULL | GPL |
…………
| group_replication | ACTIVE | GROUP REPLICATION | group_replication.so | GPL |
+----------------------------+----------+--------------------+----------------------+---------+
45 rows in set (0.00 sec)
+----------------------------+----------+--------------------+----------------------+---------+
| Name | Status | Type | Library | License |
+----------------------------+----------+--------------------+----------------------+---------+
| binlog | ACTIVE | STORAGE ENGINE | NULL | GPL |
…………
| group_replication | ACTIVE | GROUP REPLICATION | group_replication.so | GPL |
+----------------------------+----------+--------------------+----------------------+---------+
45 rows in set (0.00 sec)
開始構建集群,在mydb1(master)上執行:
# 構建集群
CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='repl' FOR CHANNEL 'group_replication_recovery';
#開啟group_replication
SET GLOBAL group_replication_bootstrap_group=ON;
START GROUP_REPLICATION;
SET GLOBAL group_replication_bootstrap_group=OFF;
mysql> select * from performance_schema.replication_group_members;
mysql> select * from performance_schema.replication_group_member_stats\G
mysql> select * from performance_schema.replication_connection_status;
mysql> select * from performance_schema.replication_applier_status;
mydb2、mydb3上加入
CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='repl' FOR CHANNEL 'group_replication_recovery';
START GROUP_REPLICATION;
CHANGE MASTER TO MASTER_USER='repl', MASTER_PASSWORD='repl' FOR CHANNEL 'group_replication_recovery';
START GROUP_REPLICATION;
驗證集群復制功能
create database test;
use test;
create table t(id int primary key auto_increment,name varchar(20));
insert into t(name) values ('allen');
insert into t(name) values ('bob');
select * from t;
create database test;
use test;
create table t(id int primary key auto_increment,name varchar(20));
insert into t(name) values ('allen');
insert into t(name) values ('bob');
select * from t;
默認為單主模式,如果要使用多主模式,則需要在加入組前將參數設置為set global group_replication_single_primary_mode=off;
基於主鍵的並行執行
set global slave_parallel_type='LOGICAL_CLOCK';
set global slave_parallel_workers=N;
set global slave_preserve_commit_order=ON;
Group Replication在開啟並行復制時,要求必須要設置slave_preserve_commit_order的值為ON
打開這個參數可以保證Applier上執行事務的提交順序和源MySQL服務器上的提交順序相同
set global slave_parallel_type='LOGICAL_CLOCK';
set global slave_parallel_workers=N;
set global slave_preserve_commit_order=ON;
Group Replication在開啟並行復制時,要求必須要設置slave_preserve_commit_order的值為ON
打開這個參數可以保證Applier上執行事務的提交順序和源MySQL服務器上的提交順序相同
強制移除故障成員,只需要在列表中的任意一個成員上設置即可
set global group_replication_force_members = 'mydb1:6606,mydb2:6606';
set global group_replication_force_members = 'mydb1:6606,mydb2:6606';
Group Replication的監控
Group Replication的狀態信息被存儲到了以下五個performance_schema表中,可以很方便地進行SQL語句查詢:
replication_group_members
replication_group_member_stats
replication_connection_status
replication_applier_status
threads
Group Replication的狀態信息被存儲到了以下五個performance_schema表中,可以很方便地進行SQL語句查詢:
replication_group_members
replication_group_member_stats
replication_connection_status
replication_applier_status
threads
查看主節點是哪個
show global status like "group_replication_primary_member";
select * from performance_schema.replication_group_members where member_id =(select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member');
show global status like "group_replication_primary_member";
select * from performance_schema.replication_group_members where member_id =(select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member');
本節點執行的GTID
select @@global.gtid_executed\G
獲取的GTID
SELECT Received_transaction_set FROM performance_schema.replication_connection_status WHERE Channel_name = 'group_replication_applier';
select @@global.gtid_executed\G
獲取的GTID
SELECT Received_transaction_set FROM performance_schema.replication_connection_status WHERE Channel_name = 'group_replication_applier';
Group Replication是MySQL復制今后的發展方向
Group Replication創建了一個獨立的TCP端口進行通信,各個MySQL服務器上的Group Replication插件通過這個端口連接在一起,兩兩之間可以直接通信。Binlog Event的傳輸並不像異步復制那樣是簡單的點到點之間的傳輸。Group Replication在傳輸數據時,使用了Paxos協議。Paxos協議保證了數據傳輸的一致性和原子性。Group Replication基於Paxos協議構建了一個分布式的狀態復制機制,這是實現多主復制的核心技術。Group Replication中不會出現腦裂的現象。Group Replication的冗余能力很好,能夠保證Binlog Event至少被復制到超過一半的成員上
單主模式:組內的成員自動選舉出主成員。初始化時,被初始化的成員自動選舉為主成員,其他加入組的成員自動成為從成員。當主成員發生故障或離開組時,會從組內的其他成員中選出一個新的主成員。選取主成員的方法很簡單,首先對所有在線成員的UUID進行排序,選取UUID最小的成員作為主成員
讀寫模式的自動切換
Set global super_read_only=1;
Set global super_read_only=0;
多主模式:在啟動Group Replication插件時,它會檢測用戶是否配置了MySQL的自增變量。如果用戶沒有配置這兩個變量(auto_increment_offset和auto_increment_increment都為1),則會自動將group_replication_auto_increment_increment和server-id的值設置到MySQL的auto_increment_increment和auto_increment_offset全局變量中
注意使用mysqldump進行導入的時候gtid_purged
測試1:MGR主節點宕機
測試2:MGR一個從節點宕機
測試3:過半從節點同時宕機
測試4:主節點連續宕機
測試5:主節點網絡抖動測試
測試6:從節點網絡抖動測試
測試7:主從節點網絡分區
測試8:主從節點網絡延遲
測試9:主節點KILL大事務
測試10:對MyISAM表的處理
測試11:MGR對外鍵的支持-單主模式
測試12:mysqldump對MGR的支持
測試13:無主鍵表的支持
測試14:gh-ost對MGR的支持
測試15:pt-osc對MGR的支持
測試16:xtrabackup對MGR的支持
測試17:binlog_format=statement的影響
--------------------------------------------------------------------------------------------------------------------------------------------
Consul部分
consul_1.2.3_linux_amd64.zip
192.168.1.121 consul server
192.168.1.101 consul client mgr
192.168.1.102 consul client mgr
192.168.1.103 consul client mgr
1、新建目錄(server和client都要建立)
[root@kettle1 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@kettle1 ~]# mkdir -p /data/consul //數據目錄
[root@kettle1 ~]# mkdir -p /data/consul/shell //存放檢查腳本
[root@mydb1 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb1 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb1 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
[root@mydb2 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb2 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb2 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
[root@mydb3 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb3 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb3 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
[root@kettle1 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@kettle1 ~]# mkdir -p /data/consul //數據目錄
[root@kettle1 ~]# mkdir -p /data/consul/shell //存放檢查腳本
[root@mydb1 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb1 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb1 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
[root@mydb2 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb2 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb2 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
[root@mydb3 ~]# mkdir /etc/consul.d/ //存放consul配置文件
[root@mydb3 ~]# mkdir -p /data/consul/ //數據目錄
[root@mydb3 ~]# mkdir -p /data/consul/shell/ //存放檢查腳本
2、新建server.json,將相關配置寫入(兩種方法:一種寫入配置文件,啟動時指定;另一種啟動時候寫成命令行的形式)
[root@kettle1 ~]# cat /etc/consul.d/server.json
{
"data_dir": "/data/consul",
"datacenter": "dc1",
"log_level": "INFO", //打印日志級別
"server": true, //是否是consul server
"node_name": "wsh", //集群node,集群中必須唯一,默認為主機名
"bootstrap_expect": 1, //通知consul我們准備加入的server節點個數,該參數是為了延遲日志復制的啟動直到我們指定數量的server節點成功加入后啟動
"bind_addr": "192.168.1.121", //指明節點的ip地址,用於集群之間的交流
"client_addr": "192.168.1.121", //client綁定的接口,默認是127.0.0.1
"ui":true
}
配置的時候將注釋去掉
[root@kettle1 ~]# cat /etc/consul.d/server.json
{
"data_dir": "/data/consul",
"datacenter": "dc1",
"log_level": "INFO", //打印日志級別
"server": true, //是否是consul server
"node_name": "wsh", //集群node,集群中必須唯一,默認為主機名
"bootstrap_expect": 1, //通知consul我們准備加入的server節點個數,該參數是為了延遲日志復制的啟動直到我們指定數量的server節點成功加入后啟動
"bind_addr": "192.168.1.121", //指明節點的ip地址,用於集群之間的交流
"client_addr": "192.168.1.121", //client綁定的接口,默認是127.0.0.1
"ui":true
}
配置的時候將注釋去掉
3、啟動server
[root@kettle1 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@kettle1 soft]# consul members -http-addr='192.168.1.121:8500'
[root@kettle1 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@kettle1 soft]# consul members -http-addr='192.168.1.121:8500'
4、配置client:除了自身的client.json,還加入了檢查MGR的腳本--192.168.1.101/102/103
[root@mydb1 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.101",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client1"
}
[root@mydb2 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.102",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client2"
}
[root@mydb3 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.103",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client3"
}
[root@mydb1 ~]# cat /etc/consul.d/r-test-mgr-ser.json
{
"service": {
"name": "r-test-3306-mydb-ser",
"tags": ["測試-3306"],
"address": "192.168.1.101",
"meta": {
"meta": "for my service"
},
"port": 3306,
"enable_tag_override": false,
"checks": [
{
"args": ["/data/consul/shell/check_mysql_mgr_slave.sh"],
"interval": "1s"
}
]
}
}
[root@mydb1 ~]# cat /etc/consul.d/w-test-mgr-ser.json
{
"service": {
"name": "w-test-3306-mydb-ser",
"tags": ["測試-3306"],
"address": "192.168.1.101",
"meta": {
"meta": "for my service"
},
"port": 3306,
"enable_tag_override": false,
"checks": [
{
"args": ["/data/consul/shell/check_mysql_mgr_master.sh"],
"interval": "10s"
}
]
}
}
注意在mydb2,mydb3上調整ip
檢測腳本如下
[root@mydb1 ~]# cat /data/consul/shell/check_mysql_mgr_master.sh
#!/bin/bash
host="192.168.1.101"
port=3306
user="dba_user"
passwod="msds007"
comm="/usr/local/mysql/bin/mysql -u$user -h$host -P $port -p$passwod"
value=`$comm -Nse "select 1"`
primary_member=`$comm -Nse "select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member'"`
server_uuid=`$comm -Nse "select variable_value from performance_schema.global_variables where VARIABLE_NAME='server_uuid';"`
# 判斷MySQL是否存活
if [ -z $value ]
then
echo "mysql $port is down....."
exit 2
fi
# 判斷節點狀態,是否存活
node_state=`$comm -Nse "select MEMBER_STATE from performance_schema.replication_group_members where MEMBER_ID='$server_uuid'"`
if [ $node_state != "ONLINE" ]
then
echo "MySQL $port state is not online...."
exit 2
fi
# 判斷是不是主節點
if [[ $server_uuid == $primary_member ]]
then
echo "MySQL $port Instance is master ........"
exit 0
else
echo "MySQL $port Instance is slave ........"
exit 2
fi
[root@mydb1 ~]# cat /data/consul/shell/check_mysql_mgr_slave.sh
#!/bin/bash
host="192.168.1.101"
port=3306
user="dba_user"
passwod="msds007"
comm="/usr/local/mysql/bin/mysql -u$user -h$host -P $port -p$passwod"
value=`$comm -Nse "select 1"`
primary_member=`$comm -Nse "select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member'"`
server_uuid=`$comm -Nse "select variable_value from performance_schema.global_variables where VARIABLE_NAME='server_uuid';"`
# 判斷mysql是否存活
if [ -z $value ]
then
echo "mysql $port is down....."
exit 2
fi
# 判斷節點狀態
node_state=`$comm -Nse "select MEMBER_STATE from performance_schema.replication_group_members where MEMBER_ID='$server_uuid'"`
if [ $node_state != "ONLINE" ]
then
echo "MySQL $port state is not online...."
exit 2
fi
# 判斷是不是主節點
if [[ $server_uuid != $primary_member ]]
then
echo "MySQL $port Instance is slave ........"
exit 0
else
node_num=`$comm -Nse "select count(*) from performance_schema.replication_group_members"`
# 判斷如果沒有任何從節點,主節點也注冊從角色服務。
if [ $node_num -eq 1 ]
then
echo "MySQL $port Instance is slave ........"
exit 0
else
echo "MySQL $port Instance is master ........"
exit 2
fi
fi
[root@mydb1 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.101",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client1"
}
[root@mydb2 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.102",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client2"
}
[root@mydb3 ~]# cat /etc/consul.d/client.json
{
"data_dir": "/data/consul",
"enable_script_checks": true,
"bind_addr": "192.168.1.103",
"retry_join": ["192.168.1.121"],
"retry_interval": "30s",
"rejoin_after_leave": true,
"start_join": ["192.168.1.121"] ,
"node_name": "mgr_client3"
}
[root@mydb1 ~]# cat /etc/consul.d/r-test-mgr-ser.json
{
"service": {
"name": "r-test-3306-mydb-ser",
"tags": ["測試-3306"],
"address": "192.168.1.101",
"meta": {
"meta": "for my service"
},
"port": 3306,
"enable_tag_override": false,
"checks": [
{
"args": ["/data/consul/shell/check_mysql_mgr_slave.sh"],
"interval": "1s"
}
]
}
}
[root@mydb1 ~]# cat /etc/consul.d/w-test-mgr-ser.json
{
"service": {
"name": "w-test-3306-mydb-ser",
"tags": ["測試-3306"],
"address": "192.168.1.101",
"meta": {
"meta": "for my service"
},
"port": 3306,
"enable_tag_override": false,
"checks": [
{
"args": ["/data/consul/shell/check_mysql_mgr_master.sh"],
"interval": "10s"
}
]
}
}
注意在mydb2,mydb3上調整ip
檢測腳本如下
[root@mydb1 ~]# cat /data/consul/shell/check_mysql_mgr_master.sh
#!/bin/bash
host="192.168.1.101"
port=3306
user="dba_user"
passwod="msds007"
comm="/usr/local/mysql/bin/mysql -u$user -h$host -P $port -p$passwod"
value=`$comm -Nse "select 1"`
primary_member=`$comm -Nse "select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member'"`
server_uuid=`$comm -Nse "select variable_value from performance_schema.global_variables where VARIABLE_NAME='server_uuid';"`
# 判斷MySQL是否存活
if [ -z $value ]
then
echo "mysql $port is down....."
exit 2
fi
# 判斷節點狀態,是否存活
node_state=`$comm -Nse "select MEMBER_STATE from performance_schema.replication_group_members where MEMBER_ID='$server_uuid'"`
if [ $node_state != "ONLINE" ]
then
echo "MySQL $port state is not online...."
exit 2
fi
# 判斷是不是主節點
if [[ $server_uuid == $primary_member ]]
then
echo "MySQL $port Instance is master ........"
exit 0
else
echo "MySQL $port Instance is slave ........"
exit 2
fi
[root@mydb1 ~]# cat /data/consul/shell/check_mysql_mgr_slave.sh
#!/bin/bash
host="192.168.1.101"
port=3306
user="dba_user"
passwod="msds007"
comm="/usr/local/mysql/bin/mysql -u$user -h$host -P $port -p$passwod"
value=`$comm -Nse "select 1"`
primary_member=`$comm -Nse "select variable_value from performance_schema.global_status WHERE VARIABLE_NAME= 'group_replication_primary_member'"`
server_uuid=`$comm -Nse "select variable_value from performance_schema.global_variables where VARIABLE_NAME='server_uuid';"`
# 判斷mysql是否存活
if [ -z $value ]
then
echo "mysql $port is down....."
exit 2
fi
# 判斷節點狀態
node_state=`$comm -Nse "select MEMBER_STATE from performance_schema.replication_group_members where MEMBER_ID='$server_uuid'"`
if [ $node_state != "ONLINE" ]
then
echo "MySQL $port state is not online...."
exit 2
fi
# 判斷是不是主節點
if [[ $server_uuid != $primary_member ]]
then
echo "MySQL $port Instance is slave ........"
exit 0
else
node_num=`$comm -Nse "select count(*) from performance_schema.replication_group_members"`
# 判斷如果沒有任何從節點,主節點也注冊從角色服務。
if [ $node_num -eq 1 ]
then
echo "MySQL $port Instance is slave ........"
exit 0
else
echo "MySQL $port Instance is master ........"
exit 2
fi
fi
5、啟動3台client
[root@mydb1 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@mydb2 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@mydb3 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@mydb1 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@mydb2 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
[root@mydb3 ~]# consul agent -config-dir=/etc/consul.d -enable-script-checks > /data/consul/consul.log &
6、查看集群狀態
[root@kettle1 ~]# consul members -http-addr='192.168.1.121:8500'
Node Address Status Type Build Protocol DC Segment
wsh 192.168.1.121:8301 alive server 1.2.3 2 dc1 <all>
mgr_client1 192.168.1.101:8301 alive client 1.2.3 2 dc1 <default>
mgr_client2 192.168.1.102:8301 alive client 1.2.3 2 dc1 <default>
mgr_client3 192.168.1.103:8301 alive client 1.2.3 2 dc1 <default>
[root@mydb1 ~]# consul members
Node Address Status Type Build Protocol DC Segment
wsh 192.168.1.121:8301 alive server 1.2.3 2 dc1 <all>
mgr_client1 192.168.1.101:8301 alive client 1.2.3 2 dc1 <default>
mgr_client2 192.168.1.102:8301 alive client 1.2.3 2 dc1 <default>
mgr_client3 192.168.1.103:8301 alive client 1.2.3 2 dc1 <default>
[root@kettle1 ~]# consul members -http-addr='192.168.1.121:8500'
Node Address Status Type Build Protocol DC Segment
wsh 192.168.1.121:8301 alive server 1.2.3 2 dc1 <all>
mgr_client1 192.168.1.101:8301 alive client 1.2.3 2 dc1 <default>
mgr_client2 192.168.1.102:8301 alive client 1.2.3 2 dc1 <default>
mgr_client3 192.168.1.103:8301 alive client 1.2.3 2 dc1 <default>
[root@mydb1 ~]# consul members
Node Address Status Type Build Protocol DC Segment
wsh 192.168.1.121:8301 alive server 1.2.3 2 dc1 <all>
mgr_client1 192.168.1.101:8301 alive client 1.2.3 2 dc1 <default>
mgr_client2 192.168.1.102:8301 alive client 1.2.3 2 dc1 <default>
mgr_client3 192.168.1.103:8301 alive client 1.2.3 2 dc1 <default>
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 17258
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 17258
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
;; ANSWER SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.101
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.101
;; ADDITIONAL SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
;; Query time: 0 msec
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 01:57:18 CST 2018
;; MSG SIZE rcvd: 116
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 01:57:18 CST 2018
;; MSG SIZE rcvd: 116
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 r-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 r-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 54349
;; flags: qr aa rd; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 3
;; WARNING: recursion requested but not available
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 r-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 54349
;; flags: qr aa rd; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 3
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;r-test-3306-mydb-ser.service.consul. IN A
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;r-test-3306-mydb-ser.service.consul. IN A
;; ANSWER SECTION:
r-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.102
r-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.103
r-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.102
r-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.103
;; ADDITIONAL SECTION:
r-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
r-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
r-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
r-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
;; Query time: 1 msec
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 01:58:45 CST 2018
;; MSG SIZE rcvd: 168
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 01:58:45 CST 2018
;; MSG SIZE rcvd: 168
7、模擬故障
停掉寫節點192.168.1.101的MySQL服務,查看狀態:
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 49700
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
停掉寫節點192.168.1.101的MySQL服務,查看狀態:
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 49700
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
;; ANSWER SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.102
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.102
;; ADDITIONAL SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
;; Query time: 0 msec
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 02:04:02 CST 2018
;; MSG SIZE rcvd: 116
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 02:04:02 CST 2018
;; MSG SIZE rcvd: 116
再停掉192.168.1.102的MySQL服務,再次查看狀態:讀寫節點全都壓在了同一台機器
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 48398
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
[root@mydb1 ~]# dig @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; <<>> DiG 9.9.4-RedHat-9.9.4-50.el7 <<>> @192.168.1.121 -p 8600 w-test-3306-mydb-ser.service.consul
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 48398
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 2
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;w-test-3306-mydb-ser.service.consul. IN A
;; ANSWER SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.103
w-test-3306-mydb-ser.service.consul. 0 IN A 192.168.1.103
;; ADDITIONAL SECTION:
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
w-test-3306-mydb-ser.service.consul. 0 IN TXT "consul-network-segment="
;; Query time: 0 msec
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 02:06:22 CST 2018
;; MSG SIZE rcvd: 116
;; SERVER: 192.168.1.121#8600(192.168.1.121)
;; WHEN: 三 10月 17 02:06:22 CST 2018
;; MSG SIZE rcvd: 116
ping w-test-3306-mydb-ser.service.consul
ping r-test-3306-mydb-ser.service.consul
ping r-test-3306-mydb-ser.service.consul
--------------------------------------------------------------------------------------------------------------------------------------------
ProxySQL部分
安裝依賴包
# yum install -y perl-DBI perl-DBD-MySQL perl-Time-HiRes perl-IO-Socket-SSL
# vim /etc/yum.repos.d/proxysql.repo
[proxysql_repo]
name= ProxySQL YUM repository
baseurl=http://repo.proxysql.com/ProxySQL/proxysql-1.4.x/centos/\$releasever
gpgcheck=1
gpgkey=http://repo.proxysql.com/ProxySQL/repo_pub_key
# yum install -y proxysql
查看proxysql涉及到哪些文件
# rpm -ql proxysql
/etc/init.d/proxysql
/etc/proxysql.cnf
/usr/bin/proxysql
/usr/share/proxysql/tools/proxysql_galera_checker.sh
/usr/share/proxysql/tools/proxysql_galera_writer.pl
# rpm -ql proxysql
/etc/init.d/proxysql
/etc/proxysql.cnf
/usr/bin/proxysql
/usr/share/proxysql/tools/proxysql_galera_checker.sh
/usr/share/proxysql/tools/proxysql_galera_writer.pl
# systemctl status proxysql.service
● proxysql.service - LSB: High Performance Advanced Proxy for MySQL
Loaded: loaded (/etc/rc.d/init.d/proxysql; bad; vendor preset: disabled)
Active: inactive (dead)
Docs: man:systemd-sysv-generator(8)
● proxysql.service - LSB: High Performance Advanced Proxy for MySQL
Loaded: loaded (/etc/rc.d/init.d/proxysql; bad; vendor preset: disabled)
Active: inactive (dead)
Docs: man:systemd-sysv-generator(8)
/etc/init.d/proxysql腳本涉及到如下目錄、文件
OLDDATADIR="/var/run/proxysql"
DATADIR="/var/lib/proxysql"
OPTS="-c /etc/proxysql.cnf -D $DATADIR"
PIDFILE="$DATADIR/proxysql.pid"
OLDDATADIR="/var/run/proxysql"
DATADIR="/var/lib/proxysql"
OPTS="-c /etc/proxysql.cnf -D $DATADIR"
PIDFILE="$DATADIR/proxysql.pid"
# more /run/systemd/generator.late/proxysql.service
# Automatically generated by systemd-sysv-generator
[Unit]
Documentation=man:systemd-sysv-generator(8)
SourcePath=/etc/rc.d/init.d/proxysql
Description=LSB: High Performance Advanced Proxy for MySQL
Before=runlevel2.target
Before=runlevel3.target
Before=runlevel4.target
Before=runlevel5.target
Before=shutdown.target
After=network-online.target
After=vmware-tools.service
After=vmware-tools-thinprint.service
Conflicts=shutdown.target
Documentation=man:systemd-sysv-generator(8)
SourcePath=/etc/rc.d/init.d/proxysql
Description=LSB: High Performance Advanced Proxy for MySQL
Before=runlevel2.target
Before=runlevel3.target
Before=runlevel4.target
Before=runlevel5.target
Before=shutdown.target
After=network-online.target
After=vmware-tools.service
After=vmware-tools-thinprint.service
Conflicts=shutdown.target
[Service]
Type=forking
Restart=no
TimeoutSec=5min
IgnoreSIGPIPE=no
KillMode=process
GuessMainPID=no
RemainAfterExit=yes
ExecStart=/etc/rc.d/init.d/proxysql start
ExecStop=/etc/rc.d/init.d/proxysql stop
ExecReload=/etc/rc.d/init.d/proxysql reload
Type=forking
Restart=no
TimeoutSec=5min
IgnoreSIGPIPE=no
KillMode=process
GuessMainPID=no
RemainAfterExit=yes
ExecStart=/etc/rc.d/init.d/proxysql start
ExecStop=/etc/rc.d/init.d/proxysql stop
ExecReload=/etc/rc.d/init.d/proxysql reload
檢查版本
# which proxysql
/usr/bin/proxysql
# which proxysql
/usr/bin/proxysql
# proxysql --version
ProxySQL version v1.4.10-1-g5eb0f3e, codename Truls
ProxySQL version v1.4.10-1-g5eb0f3e, codename Truls
# proxysql --help
High Performance Advanced Proxy for MySQL
High Performance Advanced Proxy for MySQL
USAGE: proxysql [OPTIONS]
OPTIONS:
-c, --config ARG Configuraton file
-D, --datadir ARG Datadir
-e, --exit-on-error Do not restart ProxySQL if crashes
-f, --foreground Run in foreground
-h, -help, --help, --usage Display usage instructions.
-M, --no-monitor Do not start Monitor Module
-n, --no-start Starts only the admin service
-r, --reuseport Use SO_REUSEPORT
-S, --admin-socket ARG Administration Unix Socket
-V, --version Print version
--idle-threads Create auxiliary threads to handle idle connections
--initial Rename/empty database file
--reload Merge config file into database file
--sqlite3-server Enable SQLite3 Server
-D, --datadir ARG Datadir
-e, --exit-on-error Do not restart ProxySQL if crashes
-f, --foreground Run in foreground
-h, -help, --help, --usage Display usage instructions.
-M, --no-monitor Do not start Monitor Module
-n, --no-start Starts only the admin service
-r, --reuseport Use SO_REUSEPORT
-S, --admin-socket ARG Administration Unix Socket
-V, --version Print version
--idle-threads Create auxiliary threads to handle idle connections
--initial Rename/empty database file
--reload Merge config file into database file
--sqlite3-server Enable SQLite3 Server
ProxySQL rev. v1.4.10-1-g5eb0f3e -- Tue Aug 7 12:31:55 2018
Copyright (C) 2013-2018 ProxySQL LLC
This program is free and without warranty
Copyright (C) 2013-2018 ProxySQL LLC
This program is free and without warranty
配置文件/etc/proxysql.cnf
配置文件只在第一次啟動的時候讀取進行初始化,后面只讀取db文件
先啟動,然后再修改參數
配置文件只在第一次啟動的時候讀取進行初始化,后面只讀取db文件
先啟動,然后再修改參數
啟動proxysql
# systemctl start proxysql
# ps -ef | grep proxysql
root 3157 1 0 01:21 ? 00:00:00 proxysql -c /etc/proxysql.cnf -D /var/lib/proxysql
root 3158 3157 1 01:21 ? 00:00:00 proxysql -c /etc/proxysql.cnf -D /var/lib/proxysql
root 3183 2821 0 01:21 pts/0 00:00:00 grep --color=auto proxysql
# netstat -antp | grep proxysql
tcp 0 0 0.0.0.0:6032 0.0.0.0:* LISTEN 3158/proxysql
tcp 0 0 0.0.0.0:6033 0.0.0.0:* LISTEN 3158/proxysql
# systemctl start proxysql
# ps -ef | grep proxysql
root 3157 1 0 01:21 ? 00:00:00 proxysql -c /etc/proxysql.cnf -D /var/lib/proxysql
root 3158 3157 1 01:21 ? 00:00:00 proxysql -c /etc/proxysql.cnf -D /var/lib/proxysql
root 3183 2821 0 01:21 pts/0 00:00:00 grep --color=auto proxysql
# netstat -antp | grep proxysql
tcp 0 0 0.0.0.0:6032 0.0.0.0:* LISTEN 3158/proxysql
tcp 0 0 0.0.0.0:6033 0.0.0.0:* LISTEN 3158/proxysql
6032是管理端口
6033是對外服務端口
默認管理用戶及密碼:admin,admin
6033是對外服務端口
默認管理用戶及密碼:admin,admin
連接proxysql 6032管理端口
# mysql -uadmin -padmin -h 127.0.0.1 -P6032
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 1
Server version: 5.5.30 (ProxySQL Admin Module)
# mysql -uadmin -padmin -h 127.0.0.1 -P6032
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 1
Server version: 5.5.30 (ProxySQL Admin Module)
Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
(admin@127.0.0.1) [(none)]> show databases;
+-----+---------------+-------------------------------------+
| seq | name | file |
+-----+---------------+-------------------------------------+
| 0 | main | |
| 2 | disk | /var/lib/proxysql/proxysql.db |
| 3 | stats | |
| 4 | monitor | |
| 5 | stats_history | /var/lib/proxysql/proxysql_stats.db |
+-----+---------------+-------------------------------------+
5 rows in set (0.01 sec)
+-----+---------------+-------------------------------------+
| seq | name | file |
+-----+---------------+-------------------------------------+
| 0 | main | |
| 2 | disk | /var/lib/proxysql/proxysql.db |
| 3 | stats | |
| 4 | monitor | |
| 5 | stats_history | /var/lib/proxysql/proxysql_stats.db |
+-----+---------------+-------------------------------------+
5 rows in set (0.01 sec)
main數據庫
內存配置數據庫,即MEMORY,表里存放后端db實例、用戶驗證、路由規則等信息
表名以runtime_開頭的表示ProxySQL當前運行的配置內容,不能通過DML語句修改
只能修改對應的非runtime開頭的表,然后load使其生效,save使其存到硬盤以供下次重啟加載
(admin@127.0.0.1) [(none)]> use main;
Database changed
(admin@127.0.0.1) [main]> show tables;
+--------------------------------------------+
| tables |
+--------------------------------------------+
| global_variables |
| mysql_collations |
| mysql_group_replication_hostgroups |
| mysql_query_rules |
| mysql_query_rules_fast_routing |
| mysql_replication_hostgroups |
| mysql_servers |
| mysql_users |
| proxysql_servers |
| runtime_checksums_values |
| runtime_global_variables |
| runtime_mysql_group_replication_hostgroups |
| runtime_mysql_query_rules |
| runtime_mysql_query_rules_fast_routing |
| runtime_mysql_replication_hostgroups |
| runtime_mysql_servers |
| runtime_mysql_users |
| runtime_proxysql_servers |
| runtime_scheduler |
| scheduler |
+--------------------------------------------+
20 rows in set (0.00 sec)
內存配置數據庫,即MEMORY,表里存放后端db實例、用戶驗證、路由規則等信息
表名以runtime_開頭的表示ProxySQL當前運行的配置內容,不能通過DML語句修改
只能修改對應的非runtime開頭的表,然后load使其生效,save使其存到硬盤以供下次重啟加載
(admin@127.0.0.1) [(none)]> use main;
Database changed
(admin@127.0.0.1) [main]> show tables;
+--------------------------------------------+
| tables |
+--------------------------------------------+
| global_variables |
| mysql_collations |
| mysql_group_replication_hostgroups |
| mysql_query_rules |
| mysql_query_rules_fast_routing |
| mysql_replication_hostgroups |
| mysql_servers |
| mysql_users |
| proxysql_servers |
| runtime_checksums_values |
| runtime_global_variables |
| runtime_mysql_group_replication_hostgroups |
| runtime_mysql_query_rules |
| runtime_mysql_query_rules_fast_routing |
| runtime_mysql_replication_hostgroups |
| runtime_mysql_servers |
| runtime_mysql_users |
| runtime_proxysql_servers |
| runtime_scheduler |
| scheduler |
+--------------------------------------------+
20 rows in set (0.00 sec)
global_variables參數設置,通過set來設置
mysql_query_rules:
指定Query路由到后端不同服務器的規則列表
指定Query路由到后端不同服務器的規則列表
mysql_replication_hostgroups:
監視指定主機組中所有服務器的read_only值,並且根據read_only的值將服務器分配給寫入器或讀取器主機組
監視指定主機組中所有服務器的read_only值,並且根據read_only的值將服務器分配給寫入器或讀取器主機組
mysql_servers:
后端可以連接MySQL服務器的列表
后端可以連接MySQL服務器的列表
mysql_users:
配置后端數據庫的賬號和監控的賬號
配置后端數據庫的賬號和監控的賬號
disk數據庫
持久化到硬盤的配置,sqlite數據文件
持久化到硬盤的配置,sqlite數據文件
stats數據庫
proxysql運行抓取的統計信息,包括到后端各命令的執行次數、流量、processlist、查詢種類匯總、執行時間等等
(admin@127.0.0.1) [stats]> show tables from stats;
+--------------------------------------+
| tables |
+--------------------------------------+
| global_variables |
| stats_memory_metrics |
| stats_mysql_commands_counters |
| stats_mysql_connection_pool |
| stats_mysql_connection_pool_reset |
| stats_mysql_global |
| stats_mysql_prepared_statements_info |
| stats_mysql_processlist |
| stats_mysql_query_digest |
| stats_mysql_query_digest_reset |
| stats_mysql_query_rules |
| stats_mysql_users |
| stats_proxysql_servers_checksums |
| stats_proxysql_servers_metrics |
| stats_proxysql_servers_status |
+--------------------------------------+
15 rows in set (0.00 sec)
proxysql運行抓取的統計信息,包括到后端各命令的執行次數、流量、processlist、查詢種類匯總、執行時間等等
(admin@127.0.0.1) [stats]> show tables from stats;
+--------------------------------------+
| tables |
+--------------------------------------+
| global_variables |
| stats_memory_metrics |
| stats_mysql_commands_counters |
| stats_mysql_connection_pool |
| stats_mysql_connection_pool_reset |
| stats_mysql_global |
| stats_mysql_prepared_statements_info |
| stats_mysql_processlist |
| stats_mysql_query_digest |
| stats_mysql_query_digest_reset |
| stats_mysql_query_rules |
| stats_mysql_users |
| stats_proxysql_servers_checksums |
| stats_proxysql_servers_metrics |
| stats_proxysql_servers_status |
+--------------------------------------+
15 rows in set (0.00 sec)
monitor數據庫
存儲monitor模塊收集的信息,主要是對后端db的健康、延遲檢查
(admin@127.0.0.1) [main]> show tables from monitor;
+------------------------------------+
| tables |
+------------------------------------+
| mysql_server_connect_log |
| mysql_server_group_replication_log |
| mysql_server_ping_log |
| mysql_server_read_only_log |
| mysql_server_replication_lag_log |
+------------------------------------+
5 rows in set (0.00 sec)
存儲monitor模塊收集的信息,主要是對后端db的健康、延遲檢查
(admin@127.0.0.1) [main]> show tables from monitor;
+------------------------------------+
| tables |
+------------------------------------+
| mysql_server_connect_log |
| mysql_server_group_replication_log |
| mysql_server_ping_log |
| mysql_server_read_only_log |
| mysql_server_replication_lag_log |
+------------------------------------+
5 rows in set (0.00 sec)
基本配置
配置有三個層次:runtime<=>memory<=>disk/config file
RUNTIME:代表proxysql當前生效的正在使用的配置,無法直接修改這里的配置,必須要從下一層load進來
MEMORY:MEMORY這一層上面連接RUNTIME層,下面連接持久層。在這層可以正常操作proxysql配置,隨便修改,不會影響生產環境
修改一個配置一般都是先在MEMORY層完成,然后確認正常之后再加載到RUNTIME和持久層到磁盤上
DISK和CONFIG FILE:持久化配置信息,重啟后內存中的配置信息會丟失,所以需要將配置信息保留在磁盤中。重啟時,可以從磁盤快速加載回來
配置有三個層次:runtime<=>memory<=>disk/config file
RUNTIME:代表proxysql當前生效的正在使用的配置,無法直接修改這里的配置,必須要從下一層load進來
MEMORY:MEMORY這一層上面連接RUNTIME層,下面連接持久層。在這層可以正常操作proxysql配置,隨便修改,不會影響生產環境
修改一個配置一般都是先在MEMORY層完成,然后確認正常之后再加載到RUNTIME和持久層到磁盤上
DISK和CONFIG FILE:持久化配置信息,重啟后內存中的配置信息會丟失,所以需要將配置信息保留在磁盤中。重啟時,可以從磁盤快速加載回來
添加mysql服務器列表
就是插入數據到mysql_servers
(admin@127.0.0.1) [main]> insert into mysql_servers(hostgroup_id,hostname,port) values (1,'192.168.1.101',3306);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> insert into mysql_servers(hostgroup_id,hostname,port) values (1,'192.168.1.102',3306);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> insert into mysql_servers(hostgroup_id,hostname,port) values (1,'192.168.1.103',3306);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> load mysql servers to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql servers to disk;
Query OK, 0 rows affected (0.03 sec)
(admin@127.0.0.1) [main]> select * from main.mysql_servers;
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| hostgroup_id | hostname | port | status | weight | compression | max_connections | max_replication_lag | use_ssl | max_latency_ms | comment |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| 1 | 192.168.1.101 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 1 | 192.168.1.102 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 1 | 192.168.1.103 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
3 rows in set (0.00 sec)
所有操作都要記得load to runtime和save to disk
在mydb1庫中執行
mysql> create user 'usr_proxysql_mon'@'192.168.1.%' identified by '2wsx3edc';
mysql> grant all privileges on *.* to 'usr_proxysql_mon'@'192.168.1.%';
mysql> flush privileges;
mysql> create user 'usr_proxysql_mon'@'192.168.1.%' identified by '2wsx3edc';
mysql> grant all privileges on *.* to 'usr_proxysql_mon'@'192.168.1.%';
mysql> flush privileges;
添加監控賬號
(admin@127.0.0.1) [main]> set mysql-monitor_username='usr_proxysql_mon';
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> set mysql-monitor_password='2wsx3edc';
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> load mysql variables to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql variables to disk;
Query OK, 96 rows affected (0.30 sec)
(admin@127.0.0.1) [main]> select * from main.global_variables where variable_name in ('mysql-monitor_username','mysql-monitor_password');
+------------------------+------------------+
| variable_name | variable_value |
+------------------------+------------------+
| mysql-monitor_password | 2wsx3edc |
| mysql-monitor_username | usr_proxysql_mon |
+------------------------+------------------+
2 rows in set (0.01 sec)
(admin@127.0.0.1) [main]> select * from main.runtime_global_variables where variable_name in ('mysql-monitor_username','mysql-monitor_password');
+------------------------+------------------+
| variable_name | variable_value |
+------------------------+------------------+
| mysql-monitor_password | 2wsx3edc |
| mysql-monitor_username | usr_proxysql_mon |
+------------------------+------------------+
2 rows in set (0.00 sec)
(admin@127.0.0.1) [(none)]> select * from monitor.mysql_server_connect_log;
(admin@127.0.0.1) [main]> set mysql-monitor_username='usr_proxysql_mon';
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> set mysql-monitor_password='2wsx3edc';
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> load mysql variables to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql variables to disk;
Query OK, 96 rows affected (0.30 sec)
(admin@127.0.0.1) [main]> select * from main.global_variables where variable_name in ('mysql-monitor_username','mysql-monitor_password');
+------------------------+------------------+
| variable_name | variable_value |
+------------------------+------------------+
| mysql-monitor_password | 2wsx3edc |
| mysql-monitor_username | usr_proxysql_mon |
+------------------------+------------------+
2 rows in set (0.01 sec)
(admin@127.0.0.1) [main]> select * from main.runtime_global_variables where variable_name in ('mysql-monitor_username','mysql-monitor_password');
+------------------------+------------------+
| variable_name | variable_value |
+------------------------+------------------+
| mysql-monitor_password | 2wsx3edc |
| mysql-monitor_username | usr_proxysql_mon |
+------------------------+------------------+
2 rows in set (0.00 sec)
(admin@127.0.0.1) [(none)]> select * from monitor.mysql_server_connect_log;
添加組復制信息
(admin@127.0.0.1) [main]> insert into mysql_group_replication_hostgroups (writer_hostgroup,backup_writer_hostgroup,reader_hostgroup, offline_hostgroup,active,max_writers,writer_is_also_reader,max_transactions_behind) values (1,2,3,4,1,100,0,100);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> select * from mysql_group_replication_hostgroups;
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
| writer_hostgroup | backup_writer_hostgroup | reader_hostgroup | offline_hostgroup | active | max_writers | writer_is_also_reader | max_transactions_behind | comment |
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
| 1 | 2 | 3 | 4 | 1 | 100 | 0 | 100 | NULL |
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
1 row in set (0.00 sec)
(admin@127.0.0.1) [main]> load mysql servers to runtime;
Query OK, 0 rows affected (0.01 sec)
(admin@127.0.0.1) [main]> save mysql servers to disk;
Query OK, 0 rows affected (0.04 sec)
(admin@127.0.0.1) [main]> select * from runtime_mysql_servers;
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| hostgroup_id | hostname | port | status | weight | compression | max_connections | max_replication_lag | use_ssl | max_latency_ms | comment |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| 1 | 192.168.1.101 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 3 | 192.168.1.102 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 3 | 192.168.1.103 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
3 rows in set (0.01 sec)
(admin@127.0.0.1) [main]> insert into mysql_group_replication_hostgroups (writer_hostgroup,backup_writer_hostgroup,reader_hostgroup, offline_hostgroup,active,max_writers,writer_is_also_reader,max_transactions_behind) values (1,2,3,4,1,100,0,100);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> select * from mysql_group_replication_hostgroups;
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
| writer_hostgroup | backup_writer_hostgroup | reader_hostgroup | offline_hostgroup | active | max_writers | writer_is_also_reader | max_transactions_behind | comment |
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
| 1 | 2 | 3 | 4 | 1 | 100 | 0 | 100 | NULL |
+------------------+-------------------------+------------------+-------------------+--------+-------------+-----------------------+-------------------------+---------+
1 row in set (0.00 sec)
(admin@127.0.0.1) [main]> load mysql servers to runtime;
Query OK, 0 rows affected (0.01 sec)
(admin@127.0.0.1) [main]> save mysql servers to disk;
Query OK, 0 rows affected (0.04 sec)
(admin@127.0.0.1) [main]> select * from runtime_mysql_servers;
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| hostgroup_id | hostname | port | status | weight | compression | max_connections | max_replication_lag | use_ssl | max_latency_ms | comment |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
| 1 | 192.168.1.101 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 3 | 192.168.1.102 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
| 3 | 192.168.1.103 | 3306 | ONLINE | 1 | 0 | 1000 | 0 | 0 | 0 | |
+--------------+---------------+------+--------+--------+-------------+-----------------+---------------------+---------+----------------+---------+
3 rows in set (0.01 sec)
創建檢查MGR節點狀態的函數和視圖,該視圖將為ProxySQL提供組復制相關的監控狀態指標
在mydb1庫中添加如下的試圖,及其依賴的存儲過程
下載https://github.com/lefred/mysql_gr_routing_check/blob/master/addition_to_sys.sql
[root@mydb1 ~]# cat addition_to_sys.sql
USE sys;
在mydb1庫中添加如下的試圖,及其依賴的存儲過程
下載https://github.com/lefred/mysql_gr_routing_check/blob/master/addition_to_sys.sql
[root@mydb1 ~]# cat addition_to_sys.sql
USE sys;
DELIMITER $$
CREATE FUNCTION IFZERO(a INT, b INT)
RETURNS INT
DETERMINISTIC
RETURN IF(a = 0, b, a)$$
CREATE FUNCTION IFZERO(a INT, b INT)
RETURNS INT
DETERMINISTIC
RETURN IF(a = 0, b, a)$$
CREATE FUNCTION LOCATE2(needle TEXT(10000), haystack TEXT(10000), offset INT)
RETURNS INT
DETERMINISTIC
RETURN IFZERO(LOCATE(needle, haystack, offset), LENGTH(haystack) + 1)$$
RETURNS INT
DETERMINISTIC
RETURN IFZERO(LOCATE(needle, haystack, offset), LENGTH(haystack) + 1)$$
CREATE FUNCTION GTID_NORMALIZE(g TEXT(10000))
RETURNS TEXT(10000)
DETERMINISTIC
RETURN GTID_SUBTRACT(g, '')$$
RETURNS TEXT(10000)
DETERMINISTIC
RETURN GTID_SUBTRACT(g, '')$$
CREATE FUNCTION GTID_COUNT(gtid_set TEXT(10000))
RETURNS INT
DETERMINISTIC
BEGIN
DECLARE result BIGINT DEFAULT 0;
DECLARE colon_pos INT;
DECLARE next_dash_pos INT;
DECLARE next_colon_pos INT;
DECLARE next_comma_pos INT;
SET gtid_set = GTID_NORMALIZE(gtid_set);
SET colon_pos = LOCATE2(':', gtid_set, 1);
WHILE colon_pos != LENGTH(gtid_set) + 1 DO
SET next_dash_pos = LOCATE2('-', gtid_set, colon_pos + 1);
SET next_colon_pos = LOCATE2(':', gtid_set, colon_pos + 1);
SET next_comma_pos = LOCATE2(',', gtid_set, colon_pos + 1);
IF next_dash_pos < next_colon_pos AND next_dash_pos < next_comma_pos THEN
SET result = result +
SUBSTR(gtid_set, next_dash_pos + 1,
LEAST(next_colon_pos, next_comma_pos) - (next_dash_pos + 1)) -
SUBSTR(gtid_set, colon_pos + 1, next_dash_pos - (colon_pos + 1)) + 1;
ELSE
SET result = result + 1;
END IF;
SET colon_pos = next_colon_pos;
END WHILE;
RETURN result;
END$$
RETURNS INT
DETERMINISTIC
BEGIN
DECLARE result BIGINT DEFAULT 0;
DECLARE colon_pos INT;
DECLARE next_dash_pos INT;
DECLARE next_colon_pos INT;
DECLARE next_comma_pos INT;
SET gtid_set = GTID_NORMALIZE(gtid_set);
SET colon_pos = LOCATE2(':', gtid_set, 1);
WHILE colon_pos != LENGTH(gtid_set) + 1 DO
SET next_dash_pos = LOCATE2('-', gtid_set, colon_pos + 1);
SET next_colon_pos = LOCATE2(':', gtid_set, colon_pos + 1);
SET next_comma_pos = LOCATE2(',', gtid_set, colon_pos + 1);
IF next_dash_pos < next_colon_pos AND next_dash_pos < next_comma_pos THEN
SET result = result +
SUBSTR(gtid_set, next_dash_pos + 1,
LEAST(next_colon_pos, next_comma_pos) - (next_dash_pos + 1)) -
SUBSTR(gtid_set, colon_pos + 1, next_dash_pos - (colon_pos + 1)) + 1;
ELSE
SET result = result + 1;
END IF;
SET colon_pos = next_colon_pos;
END WHILE;
RETURN result;
END$$
CREATE FUNCTION gr_applier_queue_length()
RETURNS INT
DETERMINISTIC
BEGIN
RETURN (SELECT sys.gtid_count( GTID_SUBTRACT( (SELECT Received_transaction_set FROM performance_schema.replication_connection_status WHERE Channel_name = 'group_replication_applier' ), (SELECT @@global.GTID_EXECUTED) )));
END$$
RETURNS INT
DETERMINISTIC
BEGIN
RETURN (SELECT sys.gtid_count( GTID_SUBTRACT( (SELECT Received_transaction_set FROM performance_schema.replication_connection_status WHERE Channel_name = 'group_replication_applier' ), (SELECT @@global.GTID_EXECUTED) )));
END$$
CREATE FUNCTION gr_member_in_primary_partition()
RETURNS VARCHAR(3)
DETERMINISTIC
BEGIN
RETURN (SELECT IF( MEMBER_STATE='ONLINE' AND ((SELECT COUNT(*) FROM performance_schema.replication_group_members WHERE MEMBER_STATE != 'ONLINE') >=((SELECT COUNT(*) FROM performance_schema.replication_group_members)/2) = 0),'YES', 'NO' ) FROM performance_schema.replication_group_members JOIN performance_schema.replication_group_member_stats USING(member_id));
END$$
RETURNS VARCHAR(3)
DETERMINISTIC
BEGIN
RETURN (SELECT IF( MEMBER_STATE='ONLINE' AND ((SELECT COUNT(*) FROM performance_schema.replication_group_members WHERE MEMBER_STATE != 'ONLINE') >=((SELECT COUNT(*) FROM performance_schema.replication_group_members)/2) = 0),'YES', 'NO' ) FROM performance_schema.replication_group_members JOIN performance_schema.replication_group_member_stats USING(member_id));
END$$
CREATE VIEW gr_member_routing_candidate_status
AS
SELECT sys.gr_member_in_primary_partition() as viable_candidate,
IF((SELECT (SELECT GROUP_CONCAT(variable_value) FROM performance_schema.global_variables WHERE variable_name IN ('read_only','super_read_only')) != 'OFF,OFF'), 'YES', 'NO') as read_only,
sys.gr_applier_queue_length() as transactions_behind,
Count_Transactions_in_queue as 'transactions_to_cert'
from performance_schema.replication_group_member_stats;$$
DELIMITER ;
AS
SELECT sys.gr_member_in_primary_partition() as viable_candidate,
IF((SELECT (SELECT GROUP_CONCAT(variable_value) FROM performance_schema.global_variables WHERE variable_name IN ('read_only','super_read_only')) != 'OFF,OFF'), 'YES', 'NO') as read_only,
sys.gr_applier_queue_length() as transactions_behind,
Count_Transactions_in_queue as 'transactions_to_cert'
from performance_schema.replication_group_member_stats;$$
DELIMITER ;
mysql> source /root/addition_to_sys.sql
在mydb1庫中執行
mysql> create user 'root'@'192.168.1.%' identified by '1qaz2wsx';
mysql> grant all privileges on *.* to 'root'@'192.168.1.%';
mysql> create user 'msandbox'@'192.168.1.%' identified by '1qaz2wsx';
mysql> grant all privileges on *.* to 'msandbox'@'192.168.1.%';
mysql> flush privileges;
mysql> create user 'root'@'192.168.1.%' identified by '1qaz2wsx';
mysql> grant all privileges on *.* to 'root'@'192.168.1.%';
mysql> create user 'msandbox'@'192.168.1.%' identified by '1qaz2wsx';
mysql> grant all privileges on *.* to 'msandbox'@'192.168.1.%';
mysql> flush privileges;
配置對外訪問賬號
(admin@127.0.0.1) [main]> select * from mysql_users;
Empty set (0.00 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_users(username,password,default_hostgroup,transaction_persistent) values ('root','1qaz2wsx',1,1);
Query OK, 1 row affected (0.01 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_users(username,password,default_hostgroup,transaction_persistent) values ('msandbox','1qaz2wsx',1,1);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> select * from mysql_users;
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
| username | password | active | use_ssl | default_hostgroup | default_schema | schema_locked | transaction_persistent | fast_forward | backend | frontend | max_connections |
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
| root | 1qaz2wsx | 1 | 0 | 1 | NULL | 0 | 1 | 0 | 1 | 1 | 10000 |
| msandbox | 1qaz2wsx | 1 | 0 | 1 | NULL | 0 | 1 | 0 | 1 | 1 | 10000 |
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
2 rows in set (0.00 sec)
(admin@127.0.0.1) [main]> load mysql users to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql users to disk;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> select * from mysql_users;
Empty set (0.00 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_users(username,password,default_hostgroup,transaction_persistent) values ('root','1qaz2wsx',1,1);
Query OK, 1 row affected (0.01 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_users(username,password,default_hostgroup,transaction_persistent) values ('msandbox','1qaz2wsx',1,1);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> select * from mysql_users;
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
| username | password | active | use_ssl | default_hostgroup | default_schema | schema_locked | transaction_persistent | fast_forward | backend | frontend | max_connections |
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
| root | 1qaz2wsx | 1 | 0 | 1 | NULL | 0 | 1 | 0 | 1 | 1 | 10000 |
| msandbox | 1qaz2wsx | 1 | 0 | 1 | NULL | 0 | 1 | 0 | 1 | 1 | 10000 |
+----------+----------+--------+---------+-------------------+----------------+---------------+------------------------+--------------+---------+----------+-----------------+
2 rows in set (0.00 sec)
(admin@127.0.0.1) [main]> load mysql users to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql users to disk;
Query OK, 0 rows affected (0.00 sec)
# mysql -u root -p1qaz2wsx -h 127.0.0.1 -P6033
# mysql -u msandbox -p1qaz2wsx -h 127.0.0.1 -P6033
# mysql -u msandbox -p1qaz2wsx -h 127.0.0.1 -P6033
讀寫分離
配置讀寫分離策略需要使用mysql_query_rules表。表中的match_pattern字段就是代表設置的規則,destination_hostgroup字段代表默認指定的分組,apply代表真正執行應用規則
所有以select開頭的語句全部分配到編號為3的讀組中
select for update會產生一個寫鎖,對數據查詢的時效性要求高,把它分配到編號為1的寫組里,其他所有操作都會默認路由到寫組中
(admin@127.0.0.1) [main]> select * from mysql_query_rules;
Empty set (0.00 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_query_rules(active,match_pattern,destination_hostgroup, apply) VALUES(1,'^SELECT.*FOR UPDATE$',1,1);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> insert into main.mysql_query_rules(active,match_pattern,destination_hostgroup, apply) VALUES(1,'^SELECT',3,1);
Query OK, 1 row affected (0.00 sec)
(admin@127.0.0.1) [main]> load mysql query rules to runtime;
Query OK, 0 rows affected (0.00 sec)
(admin@127.0.0.1) [main]> save mysql query rules to disk;
Query OK, 0 rows affected (0.05 sec)
(admin@127.0.0.1) [(none)]> select * from mysql_server_group_replication_log order by time_start_us desc limit 5;
+---------------+------+------------------+-----------------+------------------+-----------+---------------------+-------+
| hostname | port | time_start_us | success_time_us | viable_candidate | read_only | transactions_behind | error |
+---------------+------+------------------+-----------------+------------------+-----------+---------------------+-------+
| 192.168.1.103 | 3306 | 1539870708023091 | 4295 | YES | YES | 0 | NULL |
| 192.168.1.102 | 3306 | 1539870708022779 | 3293 | YES | YES | 0 | NULL |
| 192.168.1.101 | 3306 | 1539870708019315 | 3643 | YES | NO | 0 | NULL |
| 192.168.1.103 | 3306 | 1539870703023065 | 6330 | YES | YES | 0 | NULL |
| 192.168.1.102 | 3306 | 1539870703022702 | 4926 | YES | YES | 0 | NULL |
+---------------+------+------------------+-----------------+------------------+-----------+---------------------+-------+
5 rows in set (0.00 sec)
添加Keepalive功能