redis工具收缩节点和工具删除节点
一,完整的重新做一个redis集群
1
[root@db01 ~]# pkill redis #停掉所有节点的redis
[root@db01 ~]# rm -rf /data/redis_cluster/redis_6380/* #删除所有节点数据
[root@db01 ~]# rm -rf /data/redis_cluster/redis_6381/*
[root@db01 ~]# rm -rf /data/redis_cluster/redis_6390/*
[root@db01 ~]# rm -rf /data/redis_cluster/redis_6391/*
[root@db01 ~]# sh redis_shell.sh start 6380 #启动redis节点
root 5161 5124 1 00:38 pts/1 00:00:00 sh redis_shell.sh start 6380
root 5169 1 38 00:38 ? 00:00:00 redis-server 10.0.0.201:6380 [cluster]
root 5171 5161 34 00:38 pts/1 00:00:00 grep redis
[root@db01 ~]# sh redis_shell.sh start 6381
root 5169 1 3 00:38 ? 00:00:00 redis-server 10.0.0.201:6380 [cluster]
root 5174 5124 0 00:38 pts/1 00:00:00 sh redis_shell.sh start 6381
root 5182 1 21 00:38 ? 00:00:00 redis-server /opt/redis_cluster/redis_6381/conf/redis_6381.conf
root 5184 5174 11 00:38 pts/1 00:00:00 grep redis
[root@db01 ~]# sh redis_shell.sh start 6390
root 5169 1 2 00:38 ? 00:00:00 redis-server 10.0.0.201:6380 [cluster]
root 5182 1 5 00:38 ? 00:00:00 redis-server 10.0.0.201:6381 [cluster]
root 5187 5124 15 00:38 pts/1 00:00:00 sh redis_shell.sh start 6390
root 5195 1 35 00:38 ? 00:00:00 redis-server /opt/redis_cluster/redis_6390/conf/redis_6390.conf
root 5197 5187 7 00:38 pts/1 00:00:00 grep redis
[root@db01 ~]# sh redis_shell.sh start 6391
root 5169 1 2 00:38 ? 00:00:00 redis-server 10.0.0.201:6380 [cluster]
root 5182 1 2 00:38 ? 00:00:00 redis-server 10.0.0.201:6381 [cluster]
root 5195 1 11 00:38 ? 00:00:00 redis-server 10.0.0.201:6390 [cluster]
root 5200 5124 9 00:38 pts/1 00:00:00 sh redis_shell.sh start 6391
root 5208 1 65 00:38 ? 00:00:00 redis-server /opt/redis_cluster/redis_6391/conf/redis_6391.conf
root 5210 5200 7 00:38 pts/1 00:00:00 grep redis
[root@db01 ~]#
2利用工具创建集群:
[root@db01 ~]# cd /opt/redis_cluster/redis/src/
[root@db01 src]# ./redis-trib.rb create --replicas 1 10.0.0.201:6380 10.0.0.201:6390 10.0.0.202:6380 10.0.0.203:6380 10.0.0.201:6381 10.0.0.201:6391 10.0.0.202:6381 10.0.0.203:6381
>>> Creating cluster
[ERR] Sorry, can't connect to node 10.0.0.202:6380
[root@db01 src]# ./redis-trib.rb create --replicas 1 10.0.0.201:6380 10.0.0.201:6390 10.0.0.202:6380 10.0.0.203:6380 10.0.0.201:6381 10.0.0.201:6391 10.0.0.202:6381 10.0.0.203:6381
>>> Creating cluster
[ERR] Sorry, can't connect to node 10.0.0.202:6380
[root@db01 src]# ./redis-trib.rb create --replicas 1 10.0.0.201:6380 10.0.0.201:6390 10.0.0.202:6380 10.0.0.203:6380 10.0.0.201:6381 10.0.0.201:6391 10.0.0.202:6381 10.0.0.203:6381
>>> Creating cluster
[ERR] Sorry, can't connect to node 10.0.0.202:6380
[root@db01 src]# tail /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.201 db01
10.0.0.202 db02
10.0.0.203 db03
[root@db01 src]# ./redis-trib.rb create --replicas 1 10.0.0.201:6380 10.0.0.201:6390 10.0.0.202:6380 10.0.0.203:6380 10.0.0.201:6381 10.0.0.201:6391 10.0.0.202:6381 10.0.0.203:6381
>>> Creating cluster
>>> Performing hash slots allocation on 8 nodes...
Using 4 masters:
10.0.0.201:6380
10.0.0.202:6380
10.0.0.203:6380
10.0.0.201:6390
Adding replica 10.0.0.202:6381 to 10.0.0.201:6380
Adding replica 10.0.0.203:6381 to 10.0.0.202:6380
Adding replica 10.0.0.201:6381 to 10.0.0.203:6380
Adding replica 10.0.0.201:6391 to 10.0.0.201:6390
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095 (4096 slots) master
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots:12288-16383 (4096 slots) master
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191 (4096 slots) master
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287 (4096 slots) master
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
replicates 0e319454b080215c39045906df8758db03392f3b
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join......
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095 (4096 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots:12288-16383 (4096 slots) master
1 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 0e319454b080215c39045906df8758db03392f3b
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191 (4096 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287 (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@db01 src]#
[root@db01 ~]# sh redis_shell.sh login 6380
10.0.0.201:6380> cluster nodes
0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390 master - 0 1618506402907 2 connected 12288-16383
bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381 slave 30c50f835e3153d09b54dc0a1061a707384173a0 0 1618506400893 5 connected
c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381 slave 26e0f427f67d91b4e431d0ab656723fd4f72741d 0 1618506403919 8 connected
e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380 myself,master - 0 0 1 connected 0-4095
6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391 slave 0e319454b080215c39045906df8758db03392f3b 0 1618506404925 6 connected
26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380 master - 0 1618506401902 3 connected 4096-8191
a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381 slave e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 0 1618506405934 7 connected
30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380 master - 0 1618506399889 4 connected 8192-12287
10.0.0.201:6380>
二收缩集群:
[root@db01 ~]# cd /opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380 #任意集群节点都可以执行收缩操作
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095 (4096 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots:12288-16383 (4096 slots) master
1 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 0e319454b080215c39045906df8758db03392f3b
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191 (4096 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287 (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1365 #要分出多少槽位,4096/3=1365
What is the receiving node ID? e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 #把槽位给谁
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:0e319454b080215c39045906df8758db03392f3b #从那里获取槽位
Source node #2:done
[root@db01 src]# sh /root/redis_shell.sh login 6380
10.0.0.201:6380> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:8
cluster_size:4
cluster_current_epoch:9
cluster_my_epoch:9
cluster_stats_messages_sent:4808
cluster_stats_messages_received:4792
10.0.0.201:6380> cluster nodes
0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390 master - 0 1618508072685 2 connected 13653-16383 #发现槽位少了
bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381 slave 30c50f835e3153d09b54dc0a1061a707384173a0 0 1618508075205 5 connected
c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381 slave 26e0f427f67d91b4e431d0ab656723fd4f72741d 0 1618508076719 8 connected
e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380 myself,master - 0 0 9 connected 0-4095 12288-13652 #发现槽位多了
6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391 slave 0e319454b080215c39045906df8758db03392f3b 0 1618508075708 6 connected
26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380 master - 0 1618508074702 3 connected 4096-8191
a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381 slave e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 0 1618508076214 9 connected
30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380 master - 0 1618508077724 4 connected 8192-12287
10.0.0.201:6380>
[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095,12288-13652 (5461 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots:13653-16383 (2731 slots) master
1 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 0e319454b080215c39045906df8758db03392f3b
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191 (4096 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287 (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1365
What is the receiving node ID? 26e0f427f67d91b4e431d0ab656723fd4f72741d
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:0e319454b080215c39045906df8758db03392f3b
Source node #2:done
[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095,12288-13652 (5461 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots:15018-16383 (1366 slots) master
1 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 0e319454b080215c39045906df8758db03392f3b
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191,13653-15017 (5461 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287 (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 1366
What is the receiving node ID? 30c50f835e3153d09b54dc0a1061a707384173a0
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1:0e319454b080215c39045906df8758db03392f3b
Source node #2:done
[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095,12288-13652 (5461 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots: (0 slots) master
0 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191,13653-15017 (5461 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287,15018-16383 (5462 slots) master
2 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)?
#检查操作
[root@db01 src]# pwd
/opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb reshard 10.0.0.201:6380
>>> Performing Cluster Check (using node 10.0.0.201:6380)
M: e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380
slots:0-4095,12288-13652 (5461 slots) master
1 additional replica(s)
M: 0e319454b080215c39045906df8758db03392f3b 10.0.0.201:6390
slots: (0 slots) master
0 additional replica(s)
S: bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
S: c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381
slots: (0 slots) slave
replicates 26e0f427f67d91b4e431d0ab656723fd4f72741d
S: 6638237e42e568530abfe26b714250b034d90046 10.0.0.201:6391
slots: (0 slots) slave
replicates 30c50f835e3153d09b54dc0a1061a707384173a0
M: 26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380
slots:4096-8191,13653-15017 (5461 slots) master
1 additional replica(s)
S: a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381
slots: (0 slots) slave
replicates e0700c10140e25e7660fe9fae6a60ef5dc23bcb3
M: 30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380
slots:8192-12287,15018-16383 (5462 slots) master
2 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)?
三使用工具删除没用的节点
[root@db01 ~]# cd /opt/redis_cluster/redis/src
[root@db01 src]# ./redis-trib.rb del-node 10.0.0.201:6390 0e319454b080215c39045906df8758db03392f3b
>>> Removing node 0e319454b080215c39045906df8758db03392f3b from cluster 10.0.0.201:6390
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
[root@db01 src]# ./redis-trib.rb del-node 10.0.0.201:6391 6638237e42e568530abfe26b714250b034d90046
>>> Removing node 6638237e42e568530abfe26b714250b034d90046 from cluster 10.0.0.201:6391
>>> Sending CLUSTER FORGET messages to the cluster...
>>> SHUTDOWN the node.
[root@db01 src]# ps -ef|grep redis
root 5169 1 0 00:38 ? 00:00:36 redis-server 10.0.0.201:6380 [cluster]
root 5182 1 0 00:38 ? 00:00:23 redis-server 10.0.0.201:6381 [cluster]
root 5487 5456 0 02:25 pts/2 00:00:00 grep --color=auto redis
[root@db01 src]#
#检查删除的节点是否不存在了
[root@db01 src]# sh /root/redis_shell.sh login 6380
10.0.0.201:6380> cluster nodes
bcdf9b35de9d5766eaf25a1227715f79ada40fd6 10.0.0.201:6381 slave 30c50f835e3153d09b54dc0a1061a707384173a0 0 1618511222429 11 connected
c5356cc11b854eff71ca701ec33ec6c9680b8e70 10.0.0.203:6381 slave 26e0f427f67d91b4e431d0ab656723fd4f72741d 0 1618511223537 10 connected
e0700c10140e25e7660fe9fae6a60ef5dc23bcb3 10.0.0.201:6380 myself,slave a1e21cae39584a1fd1136b1762ae360dad5b3cf5 0 0 9 connected
26e0f427f67d91b4e431d0ab656723fd4f72741d 10.0.0.202:6380 master - 0 1618511222531 10 connected 4096-8191 13653-15017
a1e21cae39584a1fd1136b1762ae360dad5b3cf5 10.0.0.202:6381 master - 0 1618511224544 12 connected 0-4095 12288-13652
30c50f835e3153d09b54dc0a1061a707384173a0 10.0.0.203:6380 master - 0 1618511221526 11 connected 8192-12287 15018-16383
10.0.0.201:6380>