脚踏实地、勇往直前!
全部博文(1005)
分类: 数据库开发技术
2018-01-24 08:19:55
linux下安装redis集群
环境:
OS:Rad Hat Linux As5
Redis 4.0.6
3主3从配置
序号 |
主 |
从 |
1 |
192.168.56.91:6379 |
192.168.56.91:7379 |
2 |
192.168.56.92:6379 |
192.168.56.92:7379 |
3 |
192.168.56.93:6379 |
192.168.56.93:7379 |
1.安装步骤
Redis官网下载
根据情况选择下载的版本,我这里下载的版本是apache-hive-2.3.0-bin.tar.gz
在每台机器上都安装如下步骤安装
[root@pxc01 soft]#tar –zxvf redis-4.0.6.tar.gz
[root@pxc01 soft]# cp -R ./redis-4.0.6 /opt/
[root@pxc01 opt]# cd redis-4.0.6
[root@pxc01 redis-4.0.6]# make
[root@pxc01 redis-4.0.6]# make test
没有问题会提示如下输出:
\o/ All tests passed without errors!
Cleanup: may take some time... OK
make[1]: Leaving directory `/opt/redis-4.0.6/src'
继续安装
[root@pxc01 redis-4.0.6]# cd src
[root@pxc01 src]# make install
[root@pxc01 opt]# cd redis-4.0.6/
[root@pxc01 redis-4.0.6]# mkdir conf ##配置文件目录
[root@pxc01 redis-4.0.6]# mkdir log ##日志文件目录
[root@pxc01 redis-4.0.6]# mkdir run ##运行的pid文件目录
[root@pxc01 redis-4.0.6]# mkdir rdb ##快照和AOF文件目录
daemonize yes
pidfile /opt/redis-4.0.6/run/redis-6379.pid
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 0
loglevel notice
logfile "/opt/redis-4.0.6/log/redis-6379.log"
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump-6379.rdb
dir /opt/redis-4.0.6/rdb
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
appendonly yes
appendfilename "redis-6379.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
cluster-enabled yes
cluster-config-file run_6379.conf
cluster-node-timeout 15000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-entries 512
list-max-ziplist-value 64
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
# Generated by CONFIG REWRITE
#masterauth "richinfo123"
#requirepass "richinfo123"
protected-mode no
参考主库的配置文件,将6379修改为7379即可
启动每台机器上安装的redis
192.168.56.91:
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/7379.conf
192.168.56.92:
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/7379.conf
192.168.56.93:
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/7379.conf
/opt/redis-4.0.6/src/redis-trib.rb create --replicas 1 192.168.56.91:6379 192.168.56.91:7379 192.168.56.92:6379 192.168.56.92:7379 192.168.56.93:6379 192.168.56.93:7379
选项 --replicas 1 表示为集群中的每个主节点创建一个从节点
[root@pxc01 soft]# /opt/redis-4.0.6/src/redis-trib.rb create --replicas 1 192.168.56.91:6379 192.168.56.91:7379 192.168.56.92:6379 192.168.56.92:7379 192.168.56.93:6379 192.168.56.93:7379
>>> Creating cluster
>>> Performing hash slots allocation on 6 nodes...
Using 3 masters:
192.168.56.91:6379
192.168.56.92:6379
192.168.56.93:6379
Adding replica 192.168.56.92:7379 to 192.168.56.91:6379
Adding replica 192.168.56.91:7379 to 192.168.56.92:6379
Adding replica 192.168.56.93:7379 to 192.168.56.93:6379
M: def4ff42e3c7759538d0273cc64d9a17cdd129b4 192.168.56.91:6379
slots:0-5460 (5461 slots) master
S: 3b53f11a7aaa49b89df6297062fd4e9581944cfa 192.168.56.91:7379
replicates 9fb8db06a065d3c73b226b6499de167b0f078c1a
M: 9fb8db06a065d3c73b226b6499de167b0f078c1a 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
S: f4be871dd3b25f1e1ec2de0df5f281b72bc54af3 192.168.56.92:7379
replicates def4ff42e3c7759538d0273cc64d9a17cdd129b4
M: 0aedad523787113cdbccc449f7651219ffb336fd 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
S: a6e3625a31809a7cde6de1371f00b606c24966cc 192.168.56.93:7379
replicates 0aedad523787113cdbccc449f7651219ffb336fd
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join...
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: def4ff42e3c7759538d0273cc64d9a17cdd129b4 192.168.56.91:6379
slots:0-5460 (5461 slots) master
1 additional replica(s)
M: 9fb8db06a065d3c73b226b6499de167b0f078c1a 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
1 additional replica(s)
S: f4be871dd3b25f1e1ec2de0df5f281b72bc54af3 192.168.56.92:7379
slots: (0 slots) slave
replicates def4ff42e3c7759538d0273cc64d9a17cdd129b4
S: 3b53f11a7aaa49b89df6297062fd4e9581944cfa 192.168.56.91:7379
slots: (0 slots) slave
replicates 9fb8db06a065d3c73b226b6499de167b0f078c1a
M: 0aedad523787113cdbccc449f7651219ffb336fd 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: a6e3625a31809a7cde6de1371f00b606c24966cc 192.168.56.93:7379
slots: (0 slots) slave
replicates 0aedad523787113cdbccc449f7651219ffb336fd
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
登录
./redis-cli -c -h 192.168.56.91 -p 6379
查看集群信息
192.168.56.91:6379> cluster nodes
9fb8db06a065d3c73b226b6499de167b0f078c1a 192.168.56.92:6379@16379 master - 0 1515653505000 3 connected 5461-10922
f4be871dd3b25f1e1ec2de0df5f281b72bc54af3 192.168.56.92:7379@17379 slave def4ff42e3c7759538d0273cc64d9a17cdd129b4 0 1515653505399 4 connected
3b53f11a7aaa49b89df6297062fd4e9581944cfa 192.168.56.91:7379@17379 slave 9fb8db06a065d3c73b226b6499de167b0f078c1a 0 1515653504392 3 connected
0aedad523787113cdbccc449f7651219ffb336fd 192.168.56.93:6379@16379 master - 0 1515653505000 5 connected 10923-16383
a6e3625a31809a7cde6de1371f00b606c24966cc 192.168.56.93:7379@17379 slave 0aedad523787113cdbccc449f7651219ffb336fd 0 1515653505000 6 connected
def4ff42e3c7759538d0273cc64d9a17cdd129b4 192.168.56.91:6379@16379 myself,master - 0 1515653504000 1 connected 0-5460
很多通常情况下,单实例的redis运行一段时间后,根据业务需要或是公司要求升级到集群,那么这些数据怎么放入到集群中呢,通常的做法是创建好集群,将所有的slot分配到一个主节点,
把单节点的实例快照或是aof文件拷贝到集群的一个节点上,将所有数据放到主节点的16834个slot里面,然后启动集群另外的节点,将16834个slot里的数据分发到其他的节点上去,最后是为每个主节点创建从库。
数据库大小,这里是有30万个key,升级到集群后也是要这个数据,说明升级成功。
[root@pxc01 src]# ./redis-cli -h 192.168.56.91 -p 8379
192.168.56.91:8379> dbsize
(integer) 300000
在单实例中假如同时开启了RDB和AOF,还是只要AOF文件就可以了,因为当AOF和RDB同时存在的时候,Redis还是会先加载AOF文件的
192.168.56.91:8379> BGREWRITEAOF
Background append only file rewriting started
然后到数据文件目录查看aof文件
[root@pxc01 rdb]# ls -al
total 15640
drwxr-xr-x. 2 root root 70 Jan 22 17:25 .
drwxrwxr-x. 10 root root 4096 Jan 18 16:40 ..
-rw-r--r--. 1 root root 4469779 Jan 22 16:55 dump-8379.rdb
-rw-r--r--. 1 root root 11533363 Jan 22 17:25 redis-8379.aof
-rw-r--r--. 1 root root 114 Jan 18 16:54 run_8379.conf
分别启动三个节点的redis
192.168.56.91
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
192.168.56.92
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
192.168.56.93
/opt/redis-4.0.6/src/redis-server /opt/redis-4.0.6/conf/6379.conf
目前集群只有一个节点
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: def4ff42e3c7759538d0273cc64d9a17cdd129b4 192.168.56.91:6379
slots:0-16383 (16384 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
创建三个节点的集群:
./redis-trib.rb create 192.168.56.91:6379 192.168.56.92:6379 192.168.56.93:6379
[root@pxc01 src]# ./redis-trib.rb create 192.168.56.91:6379 192.168.56.92:6379 192.168.56.93:6379
>>> Creating cluster
[ERR] Node 192.168.56.91:6379 is not empty. Either the node already knows other nodes (check with CLUSTER NODES) or contains some key in database 0.
提示其中一个节点有数据,尝试了执行flushdb不管用,因为这些集群是之前使用的,需要将dump文件,aof文件,运行配置文件进行删除。
再次运行成功
[root@pxc01 src]# ./redis-trib.rb create 192.168.56.91:6379 192.168.56.92:6379 192.168.56.93:6379
>>> Creating cluster
>>> Performing hash slots allocation on 3 nodes...
Using 3 masters:
192.168.56.91:6379
192.168.56.92:6379
192.168.56.93:6379
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-5460 (5461 slots) master
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join..
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-5460 (5461 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
将单实例的数据同步到集群上面,需要将所有的卡槽迁移到一个节点上,数据同步到该节点后,再重新分配卡槽到所有的节点。
查看当前集群的卡槽分配情况
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-5460 (5461 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
可以看到每个节点的卡槽分配如下:
192.168.56.91:6379 分配的卡槽是: 0-5460
192.168.56.92:6379 分配的卡槽是: 5461-10922
192.168.56.93:6379 分配的卡槽是: 10923-16383
我们现在要将192.168.56.92和192.168.56.93的卡槽迁移到192.168.56.91上面去
先将192.168.56.92的卡槽迁移到192.168.56.91
[root@pxc01 src]# ./redis-trib.rb reshard 192.168.56.92:6379
>>> Performing Cluster Check (using node 192.168.56.92:6379)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:5461-10922 (5462 slots) master
0 additional replica(s)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-5460 (5461 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 5462 ##迁移多少卡槽
What is the receiving node ID? c5f8550043708e27e659a0a5ecdf44264d1b3e41 ##接收迁移卡槽的节点id,这里就是192.168.56.91节点对应的id
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b ##移出去的节点id,这里是192.168.56.92节点的id
Source node #2:done
可以看到92节点的卡槽已经迁移到了91节点
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-10922 (10923 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots: (0 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
下面把192.168.56.93节点的卡槽迁移到192.168.56.91节点
[root@pxc01 src]# ./redis-trib.rb reshard 192.168.56.93:6379
>>> Performing Cluster Check (using node 192.168.56.93:6379)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots: (0 slots) master
0 additional replica(s)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-10922 (10923 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 5461
What is the receiving node ID? c5f8550043708e27e659a0a5ecdf44264d1b3e41
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:00fb26359103a3dd7b55b4cfbe3e1984cc2f2387
Source node #2:done
可以看到93节点的卡槽也迁移到了91节点上面了,现在所有的卡槽都在节点1上面了
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-16383 (16384 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots: (0 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots: (0 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
先停掉集群,然后把单实例节点的aof文件和dump文件拷贝到192.168.56.91的节点对应的目录下,单实例开启了aof的化只需要aof文件即可,我这里也把dump文件拷贝过去了。
[root@pxc01 rdb]# cp redis-8379.aof /opt/redis-4.0.6/rdb/
[root@pxc01 rdb]# cp dump-8379.rdb /opt/redis-4.0.6/rdb/
进入到192.168.56.91节点进去,将刚才拷贝过来的文件命名为跟节点192.168.56.91配置文件定义的名字一样。
[root@pxc01 rdb]# mv redis-8379.aof redis-6379.aof
mv: overwrite ‘redis-6379.aof’? y
[root@pxc01 rdb]# mv dump-8379.rdb dump-6379.rdb
启动集群登录集群,查看数据情况
[root@pxc01 src]# ./redis-cli -h 192.168.56.91 -c -p 6379
192.168.56.91:6379> dbsize
(integer) 300000
192.168.56.91:6379>
数据跟单实例一样,说明数据已经同步过来.但是现在数据全部在节点1上的,我们需要重新分配卡槽,将数据分布到其他的节点。
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:0-16383 (16384 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots: (0 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots: (0 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
当前的数据卡槽全部在节点1上的,需要分发到其他的节点上面
把节点1:192.168.56.91上的上5462个slots移动节点192.168.56.92上
./redis-trib.rb reshard --from c5f8550043708e27e659a0a5ecdf44264d1b3e41 --to 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b --slots 5462 --yes 192.168.56.91:6379
把节点1:192.168.56.91上的5461个slots移动节点192.168.56.93上
./redis-trib.rb reshard --from c5f8550043708e27e659a0a5ecdf44264d1b3e41 --to 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 --slots 5461 --yes 192.168.56.91:6379
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:10923-16383 (5461 slots) master
0 additional replica(s)
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:5462-10922 (5461 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:0-5461 (5462 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
可以看到已经将部分卡槽迁移到另外的2个节点了。
每个主节点数据分布好之后,我们为每个主库创建一个从库,构成集群主从关系。
首先要启动每个从节点,接着为每个主节点创建从库.
启动从库1,让其成为主库1的备份节点
./redis-trib.rb add-node --slave --master-id c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:7379 192.168.56.91:6379
[root@pxc01 src]# ./redis-trib.rb check 192.168.56.91:6379
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: d11187b7bc773abf09917896d1b3b9964da27252 192.168.56.91:7379
slots: (0 slots) slave
replicates c5f8550043708e27e659a0a5ecdf44264d1b3e41
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:5462-10922 (5461 slots) master
0 additional replica(s)
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:0-5461 (5462 slots) master
0 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
可以看到从库1已经加入集群并成为主库1的备份节点,使用同样的方法,把另外2个从库加入到集群,并分别设置对应主库的从节点,全部加入完成后,查看集群情况:
>>> Performing Cluster Check (using node 192.168.56.91:6379)
M: c5f8550043708e27e659a0a5ecdf44264d1b3e41 192.168.56.91:6379
slots:10923-16383 (5461 slots) master
1 additional replica(s)
S: 749ef9f210b3f841c436576f7c28d2f419414a54 192.168.56.92:7379
slots: (0 slots) slave
replicates 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b
S: d11187b7bc773abf09917896d1b3b9964da27252 192.168.56.91:7379
slots: (0 slots) slave
replicates c5f8550043708e27e659a0a5ecdf44264d1b3e41
M: 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387 192.168.56.93:6379
slots:5462-10922 (5461 slots) master
1 additional replica(s)
S: dbd8174b22a59d0c97cc7213c5c4998c47126f8e 192.168.56.93:7379
slots: (0 slots) slave
replicates 00fb26359103a3dd7b55b4cfbe3e1984cc2f2387
M: 65b17f89c5d6fbadc5185df4ce6896f47ac8cd2b 192.168.56.92:6379
slots:0-5461 (5462 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered
我们从单实例同步过来的dbsize=300000
我们现在登录个节点查看数据分布情况
[root@pxc01 src]# ./redis-cli -h 192.168.56.91 -c -p 6379
192.168.56.91:6379> dbsize
(integer) 100001
192.168.56.91:6379> exit
[root@pxc01 src]# ./redis-cli -h 192.168.56.92 -c -p 6379
192.168.56.92:6379> dbsize
(integer) 100072
192.168.56.92:6379> exit
[root@pxc01 src]# ./redis-cli -h 192.168.56.93 -c -p 6379
192.168.56.93:6379> dbsize
(integer) 99927
100001+100072+99927=300000,说明数据成功转移到各节点。
[root@pxc02 redis-4.0.6]# make test
cd src && make test
make[1]: Entering directory `/opt/redis-4.0.6/src'
CC Makefile.dep
make[1]: Leaving directory `/opt/redis-4.0.6/src'
make[1]: Entering directory `/opt/redis-4.0.6/src'
You need tcl 8.5 or newer in order to run the Redis test
make[1]: *** [test] Error 1
make[1]: Leaving directory `/opt/redis-4.0.6/src'
make: *** [test] Error 2
安装tcl包:
[root@pxc02 Packages]# rpm -ivh tcl-8.5.13-8.el7.x86_64.rpm
[root@pxc02 Packages]# rpm -ivh tcl-pgtcl-2.0.0-5.el7.x86_64.rpm
错误信息:
"bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]"
(file "tests/helpers/bg_complex_data.tcl" line 10)
Killing still running Redis server 21198
make: *** [test] Error 1
解决办法:
[root@localhost src]# vim ../tests/integration/replication-2.tcl
start_server {tags {"repl"}} {
start_server {} {
test {First server should have role slave after SLAVEOF} {
r -1 slaveof [srv 0 host] [srv 0 port]
after 10000 #修改成10000
s -1 role
[root@pxc03 src]# /opt/redis-4.0.6/src/redis-trib.rb create --replicas 1 192.168.56.91:6379 192.168.56.91:7379 192.168.56.92:6379 192.168.56.92:7379 192.168.56.93:6379 192.168.56.93:7379
/usr/bin/env: ruby: No such file or directory
[root@pxc01 yum.repos.d]# /opt/redis-4.0.6/src/redis-trib.rb create --replicas 1 192.168.56.91:6379 192.168.56.91:7379 192.168.56.92:6379 192.168.56.92:7379 192.168.56.93:6379 192.168.56.93:7379
/usr/share/rubygems/rubygems/core_ext/kernel_require.rb:55:in `require': cannot load such file -- redis (LoadError)
from /usr/share/rubygems/rubygems/core_ext/kernel_require.rb:55:in `require'
from
/opt/redis-4.0.6/src/redis-trib.rb:25:in `
下载redis-4.0.0.gem,然后安装
[root@pxc01 soft]# gem install -l redis-4.0.0.gem
ERROR: Error installing redis-4.0.0.gem:
redis requires Ruby version >= 2.2.2.
查看当前系统的ruby版本
[root@pxc01 soft]# rpm -qa|grep ruby
rubygems-2.0.14.1-29.el7.noarch
rubygem-json-1.7.7-29.el7.x86_64
ruby-irb-2.0.0.648-29.el7.noarch
rubygem-io-console-0.4.2-29.el7.x86_64
ruby-libs-2.0.0.648-29.el7.x86_64
ruby-2.0.0.648-29.el7.x86_64
rubygem-rdoc-4.0.0-29.el7.noarch
rubygem-bigdecimal-1.2.0-29.el7.x86_64
rubygem-psych-2.0.0-29.el7.x86_64
下载个3.3.2的版本进行安装
[root@pxc01 soft]# gem install -l redis-3.3.2.gem
Successfully installed redis-3.3.2
Parsing documentation for redis-3.3.2
Installing ri documentation for redis-3.3.2
1 gem installed