redis實時同步工具redis-shake


環境:

OS:Centos 7

1.下載安裝介質
redis-shake-1.6.19.tar.gz

2.解壓安裝
tar -xvf redis-shake-1.6.19.tar.gz

3.修改配置文件

[root@localhost redis-shake]# more redis-shake.conf
# this is the configuration of redis-shake.
# if you have any problem, please visit https://github.com/alibaba/RedisShake/wiki/FAQ

# id
id = redis-shake

# log file,日志文件,不配置將打印到stdout (e.g. /var/log/redis-shake.log )
log.file =
# log level: "none", "error", "warn", "info", "debug", "all". default is "info". "debug" == "all"
log.level = info
# pid path,進程文件存儲地址(e.g. /var/run/),不配置將默認輸出到執行下面,
# 注意這個是目錄,真正的pid是`{pid_path}/{id}.pid`
pid_path =

# pprof port.
system_profile = 9310
# restful port, set -1 means disable, in `restore` mode RedisShake will exit once finish restoring RDB only if this value
# is -1, otherwise, it'll wait forever.
# restful port,查看metric端口, -1表示不啟用,如果是`restore`模式,只有設置為-1才會在完成RDB恢復后退出,否則會一直block。
http_profile = 9320

# parallel routines number used in RDB file syncing. default is 64.
# 啟動多少個並發線程同步一個RDB文件。
parallel = 32

# source redis configuration.
# used in `dump`, `sync` and `rump`.
# source redis type, e.g. "standalone" (default), "sentinel" or "cluster".
#   1. "standalone": standalone db mode.
#   2. "sentinel": the redis address is read from sentinel.
#   3. "cluster": the source redis has several db.
#   4. "proxy": the proxy address, currently, only used in "rump" mode.
# 源端redis的類型,支持standalone,sentinel,cluster和proxy四種模式,注意:目前proxy只用於rump模式。
source.type = cluster
# ip:port
# the source address can be the following:
#   1. single db address. for "standalone" type.
#   2. ${sentinel_master_name}:${master or slave}@sentinel single/cluster address, e.g., mymaster:master@127.0.0.1:26379;127.0.0.1:26380, or @127.0.0.1:26379;127.0.0.1:26380. for "sentinel" type.
#   3. cluster that has several db nodes split by semicolon(;). for "cluster" type. e.g., 10.1.1.1:20331;10.1.1.2:20441.
#   4. proxy address(used in "rump" mode only). for "proxy" type.
# 源redis地址。對於sentinel或者開源cluster模式,輸入格式為"master名字:拉取角色為master或者slave@sentinel的地址",別的cluster
# 架構,比如codis, twemproxy, aliyun proxy等需要配置所有master或者slave的db地址。
source.address = 192.168.1.134:7001;192.168.1.134:7002;192.168.1.134:7003;192.168.1.134:8001;192.168.1.134:8002;192.168.1.134:8003
# password of db/proxy. even if type is sentinel.
source.password_raw =oracleabc2018
# auth type, don't modify it
source.auth_type = auth
# tls enable, true or false. Currently, only support standalone.
# open source redis does NOT support tls so far, but some cloud versions do.
source.tls_enable = false
# input RDB file.
# used in `decode` and `restore`.
# if the input is list split by semicolon(;), redis-shake will restore the list one by one.
# 如果是decode或者restore,這個參數表示讀取的rdb文件。支持輸入列表,例如:rdb.0;rdb.1;rdb.2
# redis-shake將會挨個進行恢復。
source.rdb.input = local
# the concurrence of RDB syncing, default is len(source.address) or len(source.rdb.input).
# used in `dump`, `sync` and `restore`. 0 means default.
# This is useless when source.type isn't cluster or only input is only one RDB.
# 拉取的並發度,如果是`dump`或者`sync`,默認是source.address中db的個數,`restore`模式默認len(source.rdb.input)。
# 假如db節點/輸入的rdb有5個,但rdb.parallel=3,那么一次只會
# 並發拉取3個db的全量數據,直到某個db的rdb拉取完畢並進入增量,才會拉取第4個db節點的rdb,
# 以此類推,最后會有len(source.address)或者len(rdb.input)個增量線程同時存在。
source.rdb.parallel = 0
# for special cloud vendor: ucloud
# used in `decode` and `restore`.
# ucloud集群版的rdb文件添加了slot前綴,進行特判剝離: ucloud_cluster。
source.rdb.special_cloud =

# target redis configuration. used in `restore`, `sync` and `rump`.
# the type of target redis can be "standalone", "proxy" or "cluster".
#   1. "standalone": standalone db mode.
#   2. "sentinel": the redis address is read from sentinel.
#   3. "cluster": open source cluster (not supported currently).
#   4. "proxy": proxy layer ahead redis. Data will be inserted in a round-robin way if more than 1 proxy given.
# 目的redis的類型,支持standalone,sentinel,cluster和proxy四種模式。
target.type = cluster
# ip:port
# the target address can be the following:
#   1. single db address. for "standalone" type.
#   2. ${sentinel_master_name}:${master or slave}@sentinel single/cluster address, e.g., mymaster:master@127.0.0.1:26379;127.0.0.1:26380, or @127.0.0.1:26379;127.0.0.1:26380. for "sentinel" type.
#   3. cluster that has several db nodes split by semicolon(;). for "cluster" type.
#   4. proxy address(used in "rump" mode only). for "proxy" type.
target.address = 192.168.1.135:7001;192.168.1.135:7002;192.168.1.135:7003;192.168.1.135:8001;192.168.1.135:8002;192.168.1.135:8003
# password of db/proxy. even if type is sentinel.
target.password_raw =oracleabc2018
# auth type, don't modify it
target.auth_type = auth
# all the data will be written into this db. < 0 means disable.
target.db = -1
# tls enable, true or false. Currently, only support standalone.
# open source redis does NOT support tls so far, but some cloud versions do.
target.tls_enable = false
# output RDB file prefix.
# used in `decode` and `dump`.
# 如果是decode或者dump,這個參數表示輸出的rdb前綴,比如輸入有3個db,那么dump分別是:
# ${output_rdb}.0, ${output_rdb}.1, ${output_rdb}.2
target.rdb.output = local_dump
# some redis proxy like twemproxy doesn't support to fetch version, so please set it here.
# e.g., target.version = 4.0
target.version =

# use for expire key, set the time gap when source and target timestamp are not the same.
# 用於處理過期的鍵值,當遷移兩端不一致的時候,目的端需要加上這個值
fake_time =

# force rewrite when destination restore has the key
# used in `restore`, `sync` and `rump`.
# 當源目的有重復key,是否進行覆寫
rewrite = true

# filter db, key, slot, lua.
# filter db.
# used in `restore`, `sync` and `rump`.
# e.g., "0;5;10" means match db0, db5 and db10.
# at most one of `filter.db.whitelist` and `filter.db.blacklist` parameters can be given.
# if the filter.db.whitelist is not empty, the given db list will be passed while others filtered.
# if the filter.db.blacklist is not empty, the given db list will be filtered while others passed.
# all dbs will be passed if no condition given.
# 指定的db被通過,比如0;5;10將會使db0, db5, db10通過, 其他的被過濾
filter.db.whitelist =
# 指定的db被過濾,比如0;5;10將會使db0, db5, db10過濾,其他的被通過
filter.db.blacklist =
# filter key with prefix string. multiple keys are separated by ';'.
# e.g., "abc;bzz" match let "abc", "abc1", "abcxxx", "bzz" and "bzzwww".
# used in `restore`, `sync` and `rump`.
# at most one of `filter.key.whitelist` and `filter.key.blacklist` parameters can be given.
# if the filter.key.whitelist is not empty, the given keys will be passed while others filtered.
# if the filter.key.blacklist is not empty, the given keys will be filtered while others passed.
# all the namespace will be passed if no condition given.
# 支持按前綴過濾key,只讓指定前綴的key通過,分號分隔。比如指定abc,將會通過abc, abc1, abcxxx
filter.key.whitelist =
# 支持按前綴過濾key,不讓指定前綴的key通過,分號分隔。比如指定abc,將會阻塞abc, abc1, abcxxx
filter.key.blacklist =
# filter given slot, multiple slots are separated by ';'.
# e.g., 1;2;3
# used in `sync`.
# 指定過濾slot,只讓指定的slot通過
filter.slot =
# filter lua script. true means not pass. However, in redis 5.0, the lua
# converts to transaction(multi+{commands}+exec) which will be passed.
# 控制不讓lua腳本通過,true表示不通過
filter.lua = false

# big key threshold, the default is 500 * 1024 * 1024 bytes. If the value is bigger than
# this given value, all the field will be spilt and write into the target in order. If
# the target Redis type is Codis, this should be set to 1, please checkout FAQ to find
# the reason.
# 正常key如果不大,那么都是直接調用restore寫入到目的端,如果key對應的value字節超過了給定
# 的值,那么會分批依次一個一個寫入。如果目的端是Codis,這個需要置為1,具體原因請查看FAQ。
big_key_threshold = 524288000

# use psync command.
# used in `sync`.
# 默認使用sync命令,啟用將會使用psync命令
psync = false

# enable metric
# used in `sync`.
# 是否啟用metric
metric = true
# print in log
# 是否將metric打印到log中
metric.print_log = false

# sender information.
# sender flush buffer size of byte.
# used in `sync`.
# 發送緩存的字節長度,超過這個閾值將會強行刷緩存發送
sender.size = 104857600
# sender flush buffer size of oplog number.
# used in `sync`. flush sender buffer when bigger than this threshold.
# 發送緩存的報文個數,超過這個閾值將會強行刷緩存發送,對於目的端是cluster的情況,這個值
# 的調大將會占用部分內存。
sender.count = 4096
# delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also
# stored in this delay queue. this timestamp will be used to calculate the time delay when receiving
# ack from target redis.
# used in `sync`.
# 用於metric統計時延的隊列
sender.delay_channel_size = 65535

# enable keep_alive option in TCP when connecting redis.
# the unit is second.
# 0 means disable.
# TCP keep-alive保活參數,單位秒,0表示不啟用。
keep_alive = 0

# used in `rump`.
# number of keys captured each time. default is 100.
# 每次scan的個數,不配置則默認100.
scan.key_number = 50
# used in `rump`.
# we support some special redis types that don't use default `scan` command like alibaba cloud and tencent cloud.
# 有些版本具有特殊的格式,與普通的scan命令有所不同,我們進行了特殊的適配。目前支持騰訊雲的集群版"tencent_cluster"
# 和阿里雲的集群版"aliyun_cluster"。
scan.special_cloud =
# used in `rump`.
# we support to fetching data from given file which marks the key list.
# 有些雲版本,既不支持sync/psync,也不支持scan,我們支持從文件中進行讀取所有key列表並進行抓取:一行一個key。
scan.key_file =

# limit the rate of transmission. Only used in `rump` currently.
# e.g., qps = 1000 means pass 1000 keys per second. default is 500,000(0 means default)
qps = 200000

# ----------------splitter----------------
# below variables are useless for current open source version so don't set.

# replace hash tag.
# used in `sync`.
replace_hash_tag = false

 

 


4.啟動
/opt/redis-shake/redis-shake.linux -type=sync -conf=/opt/redis-shake/redis-shake.conf


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM