linux下安裝redis 3.2.1
#tar zxvf redis-3.2.1.tar.gz
#cd redis-3.2.1
#make MALLOC=libc
#cd redis-3.2.1/src
#make test
#make install
安裝完成后,會有6個redis-*文件從./redis-3.2.1/src/redis-*自動被拷貝到/usr/local/bin/下面
#cp ./redis-3.2.1/redis.conf /etc/redis.conf #拷貝配置文件到/etc並修改配置
bind 127.0.0.1 10.132.6.118
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/var/run/redis_6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /app/redis-3.2.1/data
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
maxclients 50000
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
maxmemory-policy volatile-ttl
#cluster-enabled yes
#cluster-config-file "/opt/nodes.conf"
#cluster-node-timeout 5000
啟動redis: #redis-server /etc/redis.conf &
[
root@iZ28b5ymck0Z src]# redis-server /etc/redis.conf &
[1] 28671
[
root@iZ28b5ymck0Z src]# ps -ef |grep redis
root 28671 22836 0 10:47 pts/0 00:00:00 redis-server 0.0.0.0:6379
root 28675 22836 0 10:47 pts/0 00:00:00 grep redis
停止redis: redis-cli shutdown
[
root@iZ28b5ymck0Z bin]# redis-cli shutdown
28671:M 24 Jul 10:56:23.219 # User requested shutdown...
28671:M 24 Jul 10:56:23.219 * Saving the final RDB snapshot before exiting.
28671:M 24 Jul 10:56:23.226 * DB saved on disk
28671:M 24 Jul 10:56:23.226 * Removing the pid file.
28671:M 24 Jul 10:56:23.226 # Redis is now ready to exit, bye bye...
[1]+ Done redis-server /etc/redis.conf (wd: ~/redis-3.2.1/src)
(wd now: /usr/local/bin)
[
root@iZ28b5ymck0Z bin]# ps -ef |grep redis
root 28687 22836 0 10:56 pts/0 00:00:00 grep redis
配置redis為服務並自動啟動
做一個redis啟動腳本
#cd /etc/rc.d/init.d/
#vim redis
#!/bin/sh
#chkconfig: 345 86 14
#description: Startup and shutdown script for Redis
PROGDIR=/usr/local/bin #安裝路徑
PROGNAME=redis-server
DAEMON=$PROGDIR/$PROGNAME
CONFIG=/etc/redis.conf
PIDFILE=/var/run/redis_*.pid
DESC="redis daemon"
SCRIPTNAME=/etc/rc.d/init.d/redis
start()
{
if test -x $DAEMON
then
echo -e "Starting $DESC: $PROGNAME"
if $DAEMON $CONFIG
then
echo -e "OK"
else
echo -e "failed"
fi
else
echo -e "Couldn't find Redis Server ($DAEMON)"
fi
}
stop()
{
if test -e $PIDFILE
then
echo -e "Stopping $DESC: $PROGNAME"
if kill `cat $PIDFILE`
then
echo -e "OK"
else
echo -e "failed"
fi
else
echo -e "No Redis Server ($DAEMON) running"
fi
}
restart()
{
echo -e "Restarting $DESC: $PROGNAME"
stop
start
}
list()
{
ps aux | grep $PROGNAME
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
list)
list
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|restart|list}" >&2
exit 1
;;
esac
exit 0
#chmod 755 redis
#chkconfig --add redis
#chkconfig redis on
#chkconfig --list redis
檢驗配置:
#service redis start &
#service redis list
[
root@iZ28b5ymck0Z bin]# redis-cli
127.0.0.1:6379>
#service redis stop
#serivce redis restart
Redis常用命令總結
[
root@iZ28elppcznZ logs]# redis-cli -h 127.0.0.1
redis 127.0.0.1:6379>
redis 127.0.0.1:6379>
redis 127.0.0.1:6379> ping
PONG
redis 127.0.0.1:6379> select 1
OK
redis 127.0.0.1:6379[1]> dbsize
(integer) 0
redis 127.0.0.1:6379[1]> select 0
OK
redis 127.0.0.1:6379> dbsize
(integer) 0
redis 127.0.0.1:6379> info
部署redis3.x-cluster
Cluster node
172.172.178.220:6379
172.172.178.221:6379
172.172.178.222:6379
172.172.178.223:6379
172.172.178.224:6379
172.172.178.225:6379
啟動redis各節點實例
redis-server /opt/redis.conf
[root@node1 ~]# cat /var/run/redis_6379.log
_._
_.-“__ ''-._
_.-“ `. `_. ''-._ Redis 3.2.1 (00000000/0) 64 bit
.-“ .-“`. “`\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-…-` __…-.“-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 27555
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
27555:M 29 Jun 19:43:03.276 # Server started, Redis version 3.2.1
27555:M 29 Jun 19:43:03.276 * The server is now ready to accept connections on port 6379
安裝依賴lib
yum install ruby rubygems -y
安裝gem-redis
wget https://rubygems.global.ssl.fastly.net/gems/redis-3.2.1.gem
gem install -l redis-3.2.1.gem
復制cluster管理程序到/usr/local/bin
scp redis-3.2.1/src/redis-trib.rb /usr/local/bin/redis-trib
創建cluster
–replicas 1 表示為集群中的每個主節點創建一個從節點
[root@node1 ~]# redis-trib create –replicas 1 172.172.178.220:6379 172.172.178.221:6379 172.172.178.222:6379 172.172.178.223:6379 172.172.178.224:6379 172.172.178.225:6379
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes…
Using 3 masters:
172.172.178.225:6379
172.172.178.224:6379
172.172.178.223:6379
Adding replica 172.172.178.222:6379 to 172.172.178.225:6379
Adding replica 172.172.178.221:6379 to 172.172.178.224:6379
Adding replica 172.172.178.220:6379 to 172.172.178.223:6379
S: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
replicates cd77344524a6ed5a4d1addfd34d7dd720c272386
S: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
replicates e3e1f4f9254d6e26bf381b9d83456a62fa555f62
S: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
replicates fc4d4c03b2ab454f230f0af3b84930b26b6a56ac
M: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots:10923-16383 (5461 slots) master
M: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots:5461-10922 (5462 slots) master
M: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots:0-5460 (5461 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join….
>>> Performing Cluster Check (using node 172.172.178.220:6379)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots: (0 slots) master
replicates cd77344524a6ed5a4d1addfd34d7dd720c272386
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots: (0 slots) master
replicates e3e1f4f9254d6e26bf381b9d83456a62fa555f62
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots: (0 slots) master
replicates fc4d4c03b2ab454f230f0af3b84930b26b6a56ac
M: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots:10923-16383 (5461 slots) master
M: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots:5461-10922 (5462 slots) master
M: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots:0-5460 (5461 slots) master
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
[root@node1 ~]# redis-cli cluster nodes
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467208947127 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467208947127 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,slave cd77344524a6ed5a4d1addfd34d7dd720c272386 0 0 1 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467208948629 6 connected 0-5460
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467208949131 5 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master – 0 1467208948128 4 connected 10923-16383
[root@node1 ~]#
[root@node1 ~]# tailf /opt/nodes.conf
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467208714060 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467208716066 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,slave cd77344524a6ed5a4d1addfd34d7dd720c272386 0 0 1 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467208715063 6 connected 0-5460
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467208714562 5 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master – 0 1467208715564 4 connected 10923-16383
vars currentEpoch 6 lastVoteEpoch 0
測試cluster
[root@node1 ~]# redis-cli -c -p 6379 -h localhost
127.0.0.1:6379> get name
-> Redirected to slot [5798] located at 172.172.178.224:6379
(nil)
172.172.178.224:6379> set name cluster-test
OK
172.172.178.224:6379> get name
"cluster-test"
172.172.178.224:6379> KEYS *
1) "name"
kill 223上的master測試故障轉移,可以看到220切換成了master角色
[root@node4 ~]# redis-cli shutdown
[root@node1 ~]# redis-cli cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 slave e3e1f4f9254d6e26bf381b9d83456a62fa555f62 0 1467209895832 5 connected
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master – 0 1467209896334 5 connected 5461-10922
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467209894829 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467209895832 6 connected 0-5460
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
kill 224上的master測試故障轉移,可以看到221切換成了master角色
[root@node5 ~]# ps -ef|grep redis
root 3098 1 0 21:57 ? 00:00:02 redis-server 0.0.0.0:6379 [cluster]
root 4649 20237 0 22:20 pts/0 00:00:00 grep redis
[root@node5 ~]#
[root@node5 ~]# kill -9 3098
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467210097070 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master,fail – 1467210059529 1467210058425 5 disconnected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 slave fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 0 1467210097573 6 connected
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master – 0 1467210098576 6 connected 0-5460
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
kill 225上的master測試故障轉移,可以看到222切換成了master角色
[root@node6 ~]# redis-cli shutdown
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467210296440 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 master,fail – 1467210059529 1467210058425 5 disconnected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467210297444 10 connected 0-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 master,fail - 1467210229709 1467210229107 6 disconnected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 master,fail – 1467209864421 1467209863721 4 disconnected
如果將沒有slave的master節點kill掉,集群將不可用
[root@node1 ~]# redis-cli -c
127.0.0.1:6379> get name
(error) CLUSTERDOWN The cluster is down
Redis Cluster 添加刪除節點
添加master節點
172.172.178.221:6380是新增的主節點,172.172.178.222:6379是cluster中任意節點
[root@node1 ~]# redis-trib add-node 172.172.178.221:6380 172.172.178.222:6379
>>> Adding node 172.172.178.221:6380 to cluster 172.172.178.222:6379
>>> Performing Cluster Check (using node 172.172.178.222:6379)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.172.178.221:6380 to make it join the cluster.
[OK] New node added correctly.
添加slave節點
172.172.178.221:6381是新增的從節點,172.172.178.222:6379是cluster中任意節點,–master-id是master節點的ID,–slave代表添加slave節點
[root@node1 ~]# redis-trib add-node –slave –master-id f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6381 172.172.178.222:6379
>>> Adding node 172.172.178.221:6381 to cluster 172.172.178.222:6379
>>> Performing Cluster Check (using node 172.172.178.222:6379)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
0 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 172.172.178.221:6381 to make it join the cluster.
Waiting for the cluster to join.
>>> Configure node as replica of 172.172.178.221:6380.
[OK] New node added correctly.
檢查cluster信息
[root@node1 ~]# redis-cli cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467216584316 8 connected 5461-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467216583314 8 connected
f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380 master - 0 1467216583314 0 connected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467216584316 10 connected 0-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 10923-16383
2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381 slave f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 0 1467216584816 0 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467216585318 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467216585318 7 connected
[root@node1 ~]#
[root@node1 ~]# redis-trib check 172.172.178.220:6379
>>> Performing Cluster Check (using node 172.172.178.220:6379)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
新添加的master節點沒有slots,需要重新分配slot,否則存儲數據無法被選中
[root@node1 ~]# redis-trib reshard 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1500
What is the receiving node ID? f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:all
Ready to move 1500 slots.
Source nodes:
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
Destination node:
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots: (0 slots) master
1 additional replica(s)
Resharding plan:
Moving slot 5461 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5462 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5463 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5464 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5465 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5466 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5467 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5468 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5469 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5470 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5471 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5472 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5473 from 9b3b5256f0107585d5379de7e76f526d071e8a11
Moving slot 5474 from 9b3b5256f0107585d5379de7e76f526d071e8a11
…………………..
再次檢查cluster信息
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467217411735 8 connected 5962-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467217411735 8 connected
f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380 master – 0 1467217411234 12 connected 0-498 5461-5961 10923-11421
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467217412739 10 connected 499-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 7 connected 11422-16383
2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381 slave f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 0 1467217412237 12 connected
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467217410732 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467217410732 7 connected
[root@node1 ~]#
[root@node1 ~]#
[root@node1 ~]# redis-trib check 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
1 additional replica(s)
S: 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc 172.172.178.221:6381
slots: (0 slots) slave
replicates f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5962-10922 (4961 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:499-5460 (4962 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
刪除slave節點
[root@node1 ~]# redis-trib del-node 172.172.178.221:6381 '2cc2b30e09fde453fbd42a7330bbd65d216dc6cc' 、
>>> Removing node 2cc2b30e09fde453fbd42a7330bbd65d216dc6cc from cluster 172.172.178.221:6381
>>> Sending CLUSTER FORGET messages to the cluster…
>>> SHUTDOWN the node.
刪除master節點
如果master節點有slave節點將slave節點轉移到其他master節點,如果master節點有slot,去掉分配的slot然后刪除master節點
[root@node1 ~]# redis-trib reshard 172.172.178.221:6380
>>> Performing Cluster Check (using node 172.172.178.221:6380)
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
0 additional replica(s)
S: cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379
slots: (0 slots) slave
replicates ff52c80b8477f50272a86893aec89dac153e9fbe
M: 9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379
slots:5962-10922 (4961 slots) master
1 additional replica(s)
S: fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379
slots: (0 slots) slave
replicates e61cffa1d3040dd5523e5c79912b23732618b464
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
S: e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379
slots: (0 slots) slave
replicates 9b3b5256f0107585d5379de7e76f526d071e8a11
M: e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379
slots:499-5460 (4962 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots…
>>> Check slots coverage…
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1500
What is the receiving node ID? ff52c80b8477f50272a86893aec89dac153e9fbe
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Source node #2:done
Ready to move 1500 slots.
Source nodes:
M: f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 172.172.178.221:6380
slots:0-498,5461-5961,10923-11421 (1499 slots) master
0 additional replica(s)
Destination node:
M: ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379
slots:11422-16383 (4962 slots) master
1 additional replica(s)
Resharding plan:
Moving slot 0 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 1 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 2 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 3 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 4 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 5 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 6 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 7 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 8 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
Moving slot 9 from f8ccaffd5378d7380a0f8f57b9c8b6c825688a85
……
[root@node1 ~]# redis-trib del-node 172.172.178.221:6380 'f8ccaffd5378d7380a0f8f57b9c8b6c825688a85'
>>> Removing node f8ccaffd5378d7380a0f8f57b9c8b6c825688a85 from cluster 172.172.178.221:6380
>>> Sending CLUSTER FORGET messages to the cluster…
>>> SHUTDOWN the node.
[root@node1 ~]#
[root@node1 ~]# redis-cli -c cluster nodes
9b3b5256f0107585d5379de7e76f526d071e8a11 172.172.178.221:6379 master – 0 1467218665980 8 connected 5962-10922
e3e1f4f9254d6e26bf381b9d83456a62fa555f62 172.172.178.224:6379 slave 9b3b5256f0107585d5379de7e76f526d071e8a11 0 1467218665478 8 connected
e61cffa1d3040dd5523e5c79912b23732618b464 172.172.178.222:6379 master – 0 1467218666480 10 connected 499-5460
ff52c80b8477f50272a86893aec89dac153e9fbe 172.172.178.220:6379 myself,master – 0 0 13 connected 0-498 5461-5961 10923-16383
fc4d4c03b2ab454f230f0af3b84930b26b6a56ac 172.172.178.225:6379 slave e61cffa1d3040dd5523e5c79912b23732618b464 0 1467218667484 10 connected
cd77344524a6ed5a4d1addfd34d7dd720c272386 172.172.178.223:6379 slave ff52c80b8477f50272a86893aec89dac153e9fbe 0 1467218666982 13 connected
Redis cluster工作原理
Redis 集群的 TCP 端口(Redis Cluster TCP ports)
每個 Redis 集群節點需要兩個 TCP 連接打開。正常的 TCP 端口用來服務客戶端,例如 6379,加 10000 的端口用作數據端口,在上面的例子中就是 16379。 第二個大一些的端口用於集群總線(bus),也就是使用二進制協議的點到點通信通道。集群總線被節點用 於錯誤檢測,配置更新,故障轉移授權等等。客戶端不應該嘗試連接集群總線端口,而應一直與正常的 Redis 命令端口通信,但是要確保在防火牆中打開了這兩個端口,否則 Redis 集群的節點不能相互通信。 命令端口和集群總線端口的偏移量一直固定為 10000。 注意,為了讓 Redis 集群工作正常,對每個節點: 1. 用於與客戶端通信的正常的客戶端通信端口(通常為 6379)需要開放給所有需要連接集群的客戶端 以及其他集群節點(使用客戶端端口來進行鍵遷移)。 2. 集群總線端口(客戶端端口加 10000)必須從所有的其他集群節點可達。 如果你不打開這兩個 TCP 端口,集群就無法正常工作。
Redis 集群的數據分片(Redis Cluster data sharding)
Redis 集群沒有使用一致性哈希,而是另外一種不同的分片形式,每個鍵概念上是被我們稱為哈希槽 (hash slot)的東西的一部分。 Redis 集群有 16384 個哈希槽,我們只是使用鍵的 CRC16 編碼對 16384 取模來計算一個指定鍵所屬的 哈希槽。 每一個 Redis 集群中的節點都承擔一個哈希槽的子集,例如,你可能有一個 3 個節點的集群,其中:

redis
節點 A 包含從 0 到 5500 的哈希槽。
節點 B 包含從 5501 到 11000 的哈希槽。
節點 C 包含從 11001 到 16384 的哈希槽。
這可以讓在集群中添加和移除節點非常容易。例如,如果我想添加一個新節點 D,我需要從節點 A,B, C 移動一些哈希槽到節點 D。同樣地,如果我想從集群中移除節點 A,我只需要移動 A 的哈希槽到 B 和 C。 當節點 A 變成空的以后,我就可以從集群中徹底刪除它。 因為從一個節點向另一個節點移動哈希槽並不需要停止操作,所以添加和移除節點,或者改變節點持有 的哈希槽百分比,都不需要任何停機時間(downtime)。
Redis cluster 架構(Redis Cluster Architecture)
redis-cluster 架構圖

redis
架構細節:
所有的 redis 節點彼此互聯(PING-PONG 機制),內部使用二進制協議優化傳輸速度和帶寬.
節點的 fail 是通過集群中超過半數的節點檢測失效時才生效.
客戶端與 redis 節點直連,不需要中間 proxy 層.客戶端不需要連接集群所有節點,連接集群中任何一個 可用節點即可
redis-cluster 把所有的物理節點映射到[0-16383]slot 上,cluster 負責維護 node<->slot<->value
redis-cluster 選舉:容錯

redis
領領着選舉過程是集群中所有 master 參與,如果半數以上 master 節點與 master 節點通信超過 (cluster-node-timeout),認為當前 master 節點掛掉.
什么時候整個集群不可用(cluster_state:fail)
a:如果集群任意 master 掛掉,且當前 master 沒有 slave.集群進入 fail 狀態,也可以理解成集群的 slot 映 射[0-16383]不完成時進入 fail 狀態. ps : redis-3.0.0.rc1 加入 cluster-require-full-coverage 參數,默認關閉, 打開集群兼容部分失敗.
b:如果集群超過半數以上 master 掛掉,無論是否有 slave 集群進入 fail 狀態.
ps:當集群不可用時,所有對集群的操作做都不可用,收到((error) CLUSTERDOWN The cluster is down) 錯誤.
Redis 集群的主從模型(Redis Cluster master-slave model)
為了當部分節點失效時,或者無法與大多數節點通信時仍能保持可用,Redis 集群采用每個節點擁有 1(主 服務自身)到 N 個副本(N-1 個附加的從服務器)的主從模型。 在我們的例子中,集群擁有 A,B,C 三個節點,如果節點 B 失效集群將不能繼續服務,因為我們不再 有辦法來服務在 5501-11000 范圍內的哈希槽。 但是,如果當我們創建集群后(或者稍后),我們為每一個主服務器添加一個從服務器,這樣最終的集群 就由主服務器 A,B,C 和從服務器 A1,B1,C1 組成,如果 B 節點失效系統仍能繼續服務。 B1 節點復制 B 節點,於是集群會選舉 B1 節點作為新的主服務器,並繼續正確的運轉。

redis
Redis 集群的一致性保證(Redis Cluster consistency guarantees)
Redis 集群不保證強一致性。實踐中,這意味着在特定的條件下,Redis 集群可能會丟掉一些被系統收 到的寫入請求命令。
Redis 集群為什么會丟失寫請求的第一個原因,是因為采用了異步復制。這意味着在寫期間下面的事情 發生了:
你的客戶端向主服務器 B 寫入。
主服務器 B 回復 OK 給你的客戶端。
主服務器 B 傳播寫入操作到其從服務器 B1,B2 和 B3。
手動故障轉移(Manual failover)
有時候在主服務器事實上沒有任何故障的情況下強制一次故障轉移是很有用的。例如,為了升級主服務 器節點中的一個進程,可以對其進行故障轉移使其變為一個從服務器,這樣最小化了對可用性的影響。
Redis 集群支持使用 CLUSTER FAILOVER 命令來手動故障轉移,必須在你想進行故障轉移的主服務的 其中一個從服務器上執行。
手動故障轉移很特別,和真正因為主服務器失效而產生的故障轉移要更安全,因為采取了避免過程中數 據丟失的方式,僅當系統確認新的主服務器處理完了舊的主服務器的復制流時,客戶端才從原主服務器切 換到新主服務器。
添加新節點(Adding a new node)
添加一個新節點的過程基本上就是,添加一個空節點,然后,如果是作為主節點則移動一些數據進去, 如果是從節點則其作為某個節點的副本。
兩種情況我們都會討論,先從添加一個新的主服務器實例開始。
兩種情況下,第一步要完成的都是添加一個空節點。
我們使用與其他節點相同的配置(端口號除外)在 7006 端口(我們已存在的 6 個節點已經使用了從 7000 到 7005 的端口)上開啟一個新的節點,那么為了與我們之前的節點布局一致,你得這么做:
在你的終端程序中開啟一個新的標簽窗口。
進入 cluster-test 目錄。
創建一個名為 7006 的目錄。
在里面創建一個 redis.conf 的文件,類似於其它節點使用的文件,但是使用 7006 作為端口號。
最后使用../redis-server ./redis.conf 啟動服務器。
1
|
./redis-trib.rb
add-node127.0.0.1:7006127.0.0.1:7000
|
添加副本節點(Adding a new node as a replica)
添加一個新副本可以有兩種方式。顯而易見的一種方式是再次使用 redis-trib,但是要使用—slave 選項, 像這樣:
1
|
./redis-trib.rb
add-node--slave127.0.0.1:7006127.0.0.1:7000
|
注意,這里的命令行完全像我們在添加一個新主服務器時使用的一樣,所以我們沒有指定要給哪個主服 務器添加副本。這種情況下,redis-trib 會添加一個新節點作為一個具有較少副本的隨機的主服務器的副本。
但是,你可以使用下面的命令行精確地指定你想要的主服務器作為副本的目標:
1
2
|
./redis-trib.rb
add-node--slave--master-id3c3a0c74aae0b56170ccb03a76b60cfe7dc1912e127. 0.0.1:7006127.0.0.1:7000
|
移除節點(Removing a node)
要移除一個從服務器節點,只要使用 redis-trib 的 del-node 命令就可以:
1
|
./redis-trib
del-node127.0.0.1:7000<node-id>
|
升級節點(Upgrading nodes in a Redis Cluster)
升級從服務器節點很簡單,因為你只需要停止節點然后用已更新的 Redis 版本重啟。如果有客戶端使用 從服務器節點分離讀請求,它們應該能夠在某個節點不可用時重新連接另一個從服務器。
升級主服務器要稍微復雜一些,建議的步驟是:
1. 使用 CLUSTER FAILOVER 來觸發一次手工故障轉移主服務器(請看本文檔的手工故障轉移小 節)。
2. 等待主服務器變為從服務器。
3. 像升級從服務器那樣升級這個節點。
4. 如果你想讓你剛剛升級的節點成為主服務器,觸發一次新的手工故障轉移,讓升級的節點重新變 回主服務器。
Redis 3.0 Cluster配置文檔
目錄
Redis 3.0概述
Redis 是一個高性能的key-value數據庫。 redis的出現,很大程度補償了
memcached這類keyvalue存儲的不足,在部分場合可以對關系數據庫起到很好的補充作用。它提供了Python,Ruby,Erlang,PHP客戶端,使用很方便。3.0版本加入cluster功能,解決了redis單點無法橫向擴展問題。
架構拓撲
3.0版本最大的特點就是支持cluster分布式橫向擴展,下面為3個Master節點以及3個slave節點的拓撲圖:
APP1
|
M1,M2,M3為redis三個主節點,S1,S2,S3為redis三個從節點,分別為M1,M2,M3備份數據以及故障切換使用。APP訪問數據庫可以通過連接任意一個Master節點實現。在三個Master節點的redis集群中,只容許有一個Master出故障,當多於一個Master宕機時,redis即不可用。當其中一個Master出現故障,其對應的Slave會接管故障Master的服務,保證redis 數據庫的正常使用。
准備階段
(1) 安裝包
http://redis.io
(2) 系統包
安裝gcc:yum install gcc
安裝zlib:yum install zlib
安裝ruby:yum install ruby
安裝rubygems:yum install rubygems
安裝gem redis:(下載:
http://rubygems.org/gems/redis/versions/3.0.7)
# gem install -l /tmp/redis-3.0.7.gem
Successfully installed redis-3.0.7
1 gem installed
Installing ri documentation for redis-3.0.7...
Installing RDoc documentation for redis-3.0.7...
(3) 系統參數
修改open files:# ulimit -n 10032 (默認1024) #可以打開最大文件描述符的數量,ulimit 用於限制 shell 啟動進程所占用的資源
添加vm.overcommit_memory=1:
#vi /etc/sysctl.conf
#sysctl vm.overcommit_memory=1 #參數vm.overcommit_memory控制着linux的內存分配策略,1 表示一直允許overcommit(過量使用),可以避免數據被截
關閉hugepage:# echo never > /sys/kernel/mm/transparent_hugepage/enabled #
取消對透明巨頁內存(transparent huge pages)的支持,因為這會造成 redis 使用過程產生延時和內存訪問問題
修改somaxconn :# echo 511 >/proc/sys/net/core/somaxconn #限制了接收新 TCP 連接偵聽隊列的大小,對於一個經常處理新連接的高負載 web服務環境來說,默認的 128 太小了, 太小了。大多數環境這個值建議增加到 1024 或者更多。
關閉防火牆:# service iptables stop
關閉selinux:# vi /etc/sysconfig/selinux 修改“SELINUX=disabled”
安裝Cluster
(1) 安裝軟件
# cd /redis/redis-3.0.0
# make
# make install
(2) 拷貝bin文件
# cp /redis/redis-3.2.1/src/redis-trib.rb /usr/local/bin/
# cp redis-cli /usr/local/bin/
# cp redis-server /usr/local/bin/
# which redis-trib.rb
/usr/local/bin/redis-trib.rb
(3) 配置通用config文件redis-common.conf
# vi /redis/redis-3.0.0/config/redis-common.conf
代碼如下:
|
#GENERAL
daemonize yes tcp-backlog 511 timeout 0 tcp-keepalive 0 loglevel notice databases 16 dir /redis/redis-3.0.0/data slave-serve-stale-data yes slave-read-only yes #not use default repl-disable-tcp-nodelay yes slave-priority 100 appendonly yes appendfsync everysecno-appendfsync-on-rewrite yes auto-aof-rewrite-min-size 64mb lua-time-limit 5000 cluster-enabled yes cluster-node-timeout 15000 cluster-migration-barrier 1 slowlog-log-slower-than 10000 slowlog-max-len 128 notify-keyspace-events "" hash-max-ziplist-entries 512 hash-max-ziplist-value 64 list-max-ziplist-entries 512 list-max-ziplist-value 64 set-max-intset-entries 512 zset-max-ziplist-entries 128 zset-max-ziplist-value 64 activerehashing yes client-output-buffer-limit normal 0 0 0 client-output-buffer-limit slave 256mb 64mb 60 client-output-buffer-limit pubsub 32mb 8mb 60 hz 10 aof-rewrite-incremental-fsync yes
|
(4) 節點1配置文件redis-6379.conf
# vi /redis/redis-3.0.0/config/redis-6379.conf
代碼如下:
|
include /redis/redis-3.0.0/config/redis-common.conf
port 6379 logfile "/redis/redis-3.0.0/log/redis-6379.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6379.aof" dbfilename dump-6379.rdb #dir /redis/redis-3.0.0/data cluster-config-file nodes-6379.conf auto-aof-rewrite-percentage 80-100
|
(5) 節點2配置文件redis-6389.conf
# vi /redis/redis-3.0.0/config/redis-6389.conf
代碼如下:
|
include /redis/redis-3.0.0/config/redis-common.conf
port 6389 logfile "/redis/redis-3.0.0/log/redis-6389.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6389.aof" dbfilename dump-6389.rdb cluster-config-file nodes-6389.conf auto-aof-rewrite-percentage 80-100
|
(6) 節點3配置文件redis-6399.conf
# vi /redis/redis-3.0.0/config/redis-6399.conf
代碼如下:
|
include /redis/redis-3.0.0/config/redis-common.conf
port 6399 logfile "/redis/redis-3.0.0/log/redis-6399.log" maxmemory 100m # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set # allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations maxmemory-policy allkeys-lru appendfilename "appendonly-6399.aof" dbfilename dump-6399.rdb cluster-config-file nodes-6399.conf auto-aof-rewrite-percentage 80-100
|
(7) 啟動redis節點
# redis-server redis-6379.conf
# redis-server redis-6389.conf
# redis-server redis-6399.conf
# redis-server redis-7379.conf
# redis-server redis-7389.conf
# redis-server redis-7399.conf
# ps -ef| grep redis
root 4623 1 0 11:07 ? 00:00:00 redis-server *:6379 [cluster]
root 4627 1 0 11:07 ? 00:00:00 redis-server *:6389 [cluster]
root 4631 1 0 11:07 ? 00:00:00 redis-server *:6399 [cluster]
root 4654 1 0 11:30 ? 00:00:00 redis-server *:7379 [cluster]
root 4658 1 0 11:30 ? 00:00:00 redis-server *:7389 [cluster]
root 4662 1 0 11:30 ? 00:00:00 redis-server *:7399 [cluster]
指定主從
redis-trib.rb create --replicas 1 172.31.103.211:6379 172.31.103.210:6379 172.31.103.209:6379 172.31.103.211:6389 172.31.103.210:6389 172.31.103.209:6389 前三個為主節點,后三個為從節點
(8) 通過redis-trib創建cluster
#--replicas 則指定了為Redis Cluster中的每個Master節點配備幾個Slave節點
# redis-trib.rb create --replicas 1 10.27.17.115:6379 10.27.17.115:6389 10.30.191.77:6379 10.30.191.77:6389 10.27.16.140:6379 10.27.16.140:6389
>>> Creating cluster
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
192.168.3.88:6379
192.168.3.88:6389
192.168.3.88:6399
Adding replica 192.168.3.88:7379 to 192.168.3.88:6379
Adding replica 192.168.3.88:7389 to 192.168.3.88:6389
Adding replica 192.168.3.88:7399 to 192.168.3.88:6399
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join......
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
M: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) master
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
M: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) master
replicates d1d124d35c848e9c8e726b59af669c9196557869
M: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) master
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
(9) 檢查集群狀態
連接任意節點,執行redis-trib.rb
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
管理cluster
(1) 添加master節點
添加的master節點配置在另一個服務器上,首先配置config文件
# vi /redis/redis-3.0.0/config/redis-6379.conf
# vi /redis/redis-3.0.0/config/redis-7379.conf
使用redis-trib.rb添加節點
在已有集群服務器(192.168.3.88)上執行
(注意:add-node的使用方法為new_host:new_port existing_host:existing_port,前面是新添加的節點信息,后面是已存在的節點信息)
# redis-trib.rb add-node 192.168.3.61:6379 192.168.3.88:6379
>>> Adding node 192.168.3.61:6379 to cluster 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
Connecting to node 192.168.3.61:6379: OK
>>> Send CLUSTER MEET to node 192.168.3.61:6379 to make it join the cluster.
[OK] New node added correctly.
選擇其中一個節點,檢查集群狀態,發現剛添加的節點已經在集群中了,角色是master,但是並沒有slot分配到新加的節點上,后面要通過shard命令分配slot。
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
0 additional replica(s)
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
主節點添加完畢后,需要給該主節點添加一個slave節點,添加過程和主節點一直,添加完成后需要在redis中進行設置。
# redis-trib.rb add-node 192.168.3.61:7379 192.168.3.88:6379
顯示內容省略
鏈接到要添加的slave數據庫中,執行replicate操作。后面的ID為Master 192.168.3.61:6379的ID,通過redis-trib.rb check可以看到。
# redis-cli -c -h 192.168.3.61 -p 7379
192.168.3.61:7379> cluster replicate 89be535ff56586dcec56f14122add80d89a57bb3
OK
根據check結果,可以看到新添加的slave以及成功和Master建立聯系。
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
(2) 數據分片
加入新的節點后,需要將其他的節點中的hash slot移動到新的節點中,以達到負載均衡的效果,指定集群中其中一個節點的地址
# redis-trib.rb reshard 192.168.3.6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:5461-10922 (5462 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots: (0 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)?4096 --16384/4=4096.master建議為基數
What is the receiving node ID? 89be535ff56586dcec56f14122add80d89a57bb3 --新加的主節點ID
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:all --從所有其他master節點均勻把slot移動到新加的主節點
.....
.....
Moving slot 12284 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12285 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12286 from 192.168.3.88:6399 to 192.168.3.61:6379:
Moving slot 12287 from 192.168.3.88:6399 to 192.168.3.61:6379:
再次check,發現所有主節點的slot都變成4096了
# redis-trib.rb check 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:1365-5460 (4096 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:6827-10922 (4096 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots:0-1364,5461-6826,10923-12287 (4096 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots:12288-16383 (4096 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
(3) 刪除節點
刪除主節點之前,需要先將slot遷移到其他主節點上
# redis-trib.rb reshard 192.168.3.88:6379
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:7379: OK
>>> Performing Cluster Check (using node 192.168.3.88:6379)
M: 05fe758161e2cbe23240697f47f1cd2c937a675b 192.168.3.88:6379
slots:1365-5460,12288-16383 (8192 slots) master
1 additional replica(s)
S: 98dae5126228dea54d1321eeb357d8773bd2ee11 192.168.3.88:7389
slots: (0 slots) slave
replicates d1d124d35c848e9c8e726b59af669c9196557869
S: d013aee7cae8163f787cb6445778ff97bf66ce17 192.168.3.88:7399
slots: (0 slots) slave
replicates d64223d6695fcc7e1030f219f09d7488c438cf39
M: d1d124d35c848e9c8e726b59af669c9196557869 192.168.3.88:6389
slots:6827-10922 (4096 slots) master
1 additional replica(s)
M: 89be535ff56586dcec56f14122add80d89a57bb3 192.168.3.61:6379
slots:0-1364,5461-6826,10923-12287 (4096 slots) master
1 additional replica(s)
S: 92017f0258675b02a7799726339efabf7d005f8c 192.168.3.61:7379
slots: (0 slots) slave
replicates 89be535ff56586dcec56f14122add80d89a57bb3
M: d64223d6695fcc7e1030f219f09d7488c438cf39 192.168.3.88:6399
slots: (0 slots) master
1 additional replica(s)
S: 7f77ec03e40d0cc9f343d783a293ae8aa6c6e090 192.168.3.88:7379
slots: (0 slots) slave
replicates 05fe758161e2cbe23240697f47f1cd2c937a675b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4906
What is the receiving node ID? 89be535ff56586dcec56f14122add80d89a57bb3
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:d1d124d35c848e9c8e726b59af669c9196557869
Source node #2:done
……
……
Moving slot 10920 from d1d124d35c848e9c8e726b59af669c9196557869
Moving slot 10921 from d1d124d35c848e9c8e726b59af669c9196557869
Moving slot 10922 from d1d124d35c848e9c8e726b59af669c9196557869
Do you want to proceed with the proposed reshard plan (yes/no)?yes
……
……
Moving slot 10920 from 192.168.3.88:6389 to 192.168.3.61:6379:
Moving slot 10921 from 192.168.3.88:6389 to 192.168.3.61:6379:
Moving slot 10922 from 192.168.3.88:6389 to 192.168.3.61:6379:
檢查節點的slot是否完全遷移走,完成后就可以刪除節點了
# redis-trib.rb check 192.168.3.88:6399
# redis-trib.rb del-node 192.168.3.88:6399 d64223d6695fcc7e1030f219f09d7488c438cf39
>>> Removing node d64223d6695fcc7e1030f219f09d7488c438cf39 from cluster 192.168.3.88:6399
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:7399: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> 192.168.3.88:7399 as replica of 192.168.3.88:6399
/usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis/client.rb:97:in `call': ERR Can't forget my master!(Redis::CommandError)
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:2432:in `method_missing'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:37:in `synchronize'
from /usr/lib/ruby/1.8/monitor.rb:242:in `mon_synchronize'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:37:in `synchronize'
from /usr/lib/ruby/gems/1.8/gems/redis-3.0.7/lib/redis.rb:2431:in `method_missing'
from /usr/local/bin/redis-trib.rb:1086:in `delnode_cluster_cmd'
from /usr/local/bin/redis-trib.rb:1078:in `each'
from /usr/local/bin/redis-trib.rb:1078:in `delnode_cluster_cmd'
from /usr/local/bin/redis-trib.rb:1373:in `send'
from /usr/local/bin/redis-trib.rb:1373
刪除主節點之前,需要先刪除主節點的slave節點,否則會報如上錯誤
# redis-trib.rb del-node 192.168.3.88:7399 d013aee7cae8163f787cb6445778ff97bf66ce17
>>> Removing node d013aee7cae8163f787cb6445778ff97bf66ce17 from cluster 192.168.3.88:7399
Connecting to node 192.168.3.88:7399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.61:7379: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:6389: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
# redis-trib.rb del-node 192.168.3.88:6399 d64223d6695fcc7e1030f219f09d7488c438cf39
>>> Removing node d64223d6695fcc7e1030f219f09d7488c438cf39 from cluster 192.168.3.88:6399
Connecting to node 192.168.3.88:6399: OK
Connecting to node 192.168.3.61:6379: OK
Connecting to node 192.168.3.88:6379: OK
Connecting to node 192.168.3.88:7389: OK
Connecting to node 192.168.3.88:7379: OK
Connecting to node 192.168.3.88:6389: OK
Connecting to node 192.168.3.61:7379: OK
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.