第八次作业-20230903

一、熟练安装Redis

1.1 centos7 和 ubuntu18 通过 yum/apt 安装最新版的 Redis

# centos7 安装最新版 Redis
[root@centos7-mini5 ~]# cat > /etc/yum.repos.d/redis.repo <<EOF
> [Redis]
> name=redis
> baseurl=http://packages.redis.io/rpm/rhel7
> enabled=1
> gpgcheck=1
> EOF
[root@centos7-mini5 ~]# curl -fsSL https://packages.redis.io/gpg > /tmp/redis.key
[root@centos7-mini5 ~]# sudo rpm --import /tmp/redis.key
## Redis stack是Redis的扩展,它添加了现代数据模型和处理引擎,以提供完整的开发人员体验
[root@centos7-mini5 ~]# yum list redis-stack-server --showduplicates | sort -r
[root@centos7-mini5 ~]# yum -y install redis-stack-server
[root@centos7-mini5 ~]# systemctl start redis-stack-server.service
[root@centos7-mini5 ~]# redis-cli --version
redis-cli 7.2.0

# ubuntu18 中安装最新版 Redis
root@ubuntu18-server11:~# curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg
root@ubuntu18-server11:~# sudo chmod 644 /usr/share/keyrings/redis-archive-keyring.gpg
root@ubuntu18-server11:~# echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list
root@ubuntu18-server11:~# apt update
root@ubuntu18-server11:~# apt-cache madison redis-server
root@ubuntu18-server11:~# apt -y install redis-server
root@ubuntu18-server11:~# redis-cli --version
redis-cli 7.2.0

1.2 在 Ubuntu18 中编译安装Redis

# Redis 各版本源码包下载地址
https://download.redis.io/releases/

# 下载源码包
root@ubuntu18-server11:~# apt update
root@ubuntu18-server11:~# wget https://download.redis.io/releases/redis-7.2.0.tar.gz

# 安装常用软件和环境依赖包
root@ubuntu18-server11:~# apt -y install iproute2 libssl-dev gcc openssh-server lrzsz openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev unzip zip make libsystemd-dev pkg-config

# 开始源码编译
root@ubuntu18-server11:~# tar xvf redis-7.2.0.tar.gz -C /usr/local/src/^C
root@ubuntu18-server11:~# cd /usr/local/src/redis-7.2.0/
root@ubuntu18-server11:/usr/local/src/redis-7.2.0# make USE_SYSTEMD=yes PREFIX=/apps/redis install

# 查看编译完成后相关命令
root@ubuntu18-server11:~# ll /apps/redis/bin/
total 31540
drwxr-xr-x 2 root root     4096 Sep  4 14:20 ./
drwxr-xr-x 3 root root     4096 Sep  4 14:20 ../
## Redis 性能测试工具
-rwxr-xr-x 1 root root  7850216 Sep  4 14:20 redis-benchmark*
## AOF 文件检查工具
lrwxrwxrwx 1 root root       12 Sep  4 14:20 redis-check-aof -> redis-server*
## RDB 文件检查工具	
lrwxrwxrwx 1 root root       12 Sep  4 14:20 redis-check-rdb -> redis-server*
## 客户端工具
-rwxr-xr-x 1 root root  8209680 Sep  4 14:20 redis-cli*
## 哨兵
lrwxrwxrwx 1 root root       12 Sep  4 14:20 redis-sentinel -> redis-server*
## Redis 服务启动命令
-rwxr-xr-x 1 root root 16222784 Sep  4 14:20 redis-server*
root@ubuntu18-server11:~# ln -sv /apps/redis/bin/redis-* /usr/bin/

# 创建Redis服务相关目录和文件
root@ubuntu18-server11:~# mkdir /apps/redis/{etc,logs,data,run}
## 拷贝模板配置文件
root@ubuntu18-server11:~# cp /usr/local/src/redis-7.2.0/redis.conf /apps/redis/etc/
## 拷贝Redis服务自启动文件并修改相关配置
root@ubuntu18-server11:~# cp /usr/local/src/redis-7.2.0/utils/systemd-redis_server.service /lib/systemd/system/redis-server.service
root@ubuntu18-server11:~# cat /lib/systemd/system/redis-server.service
[Unit]
Description=Redis data structure server
Documentation=https://redis.io/documentation
#Before=your_application.service another_example_application.service
#AssertPathExists=/var/lib/redis
Wants=network-online.target
After=network-online.target

[Service]
#ExecStart=/usr/local/bin/redis-server --supervised systemd --daemonize no
## Alternatively, have redis-server load a configuration file:
#ExecStart=/usr/local/bin/redis-server /path/to/your/redis.conf
ExecStart=/apps/redis/bin/redis-server /apps/redis/etc/redis.conf --supervised systemd
ExecReload=/bin/kill -s HUP $MAINPID 
ExecStop=/bin/kill -s QUIT $MAINPID
LimitNOFILE=10032
NoNewPrivileges=yes
#OOMScoreAdjust=-900
#PrivateTmp=yes
Type=notify
TimeoutStartSec=infinity
TimeoutStopSec=infinity
UMask=0077
User=redis
Group=redis
WorkingDirectory=/apps/redis

[Install]
WantedBy=multi-user.target

# 创建启动用户并修改配置文件
root@ubuntu18-server11:~# groupadd -g 2099 redis && useradd -u 2099 -g 2099 redis -s /sbin/nologin
root@ubuntu18-server11:~# chown -R redis:redis /apps/redis/
root@ubuntu18-server11:~# vim /apps/redis/etc/redis.conf
bind 0.0.0.0
requirepass 123456
pidfile /apps/redis/run/redis_6379.pid
logfile "/apps/redis/logs/redis_6379.log"
dir /apps/redis/data/

# 启动Redis服务
root@ubuntu18-server11:~# systemctl daemon-reload && systemctl enable --now redis-server.service
root@ubuntu18-server11:~# ss -ntl | grep 6379
LISTEN   0         128                 0.0.0.0:6379             0.0.0.0:*
root@ubuntu18-server11:~# redis-cli -a 123456
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6379> info
# Server
redis_version:7.2.0
redis_git_sha1:00000000
redis_git_dirty:0
redis_build_id:ff386f491efc6121
redis_mode:standalone
os:Linux 4.15.0-213-generic x86_64

Redis 可能出现的告警信息处理

1、tcp-backlog: backlog参数控制的是三次握手时server端收到client端ack确认后的对列值
# 内核中默认值
root@ubuntu18-server11:~# sysctl -a | grep somaxconn
sysctl: net.core.somaxconn = 128
# redis 配置文件默认值
root@ubuntu18-server11:~# cat /apps/redis/etc/redis.conf | grep tcp-backlog
tcp-backlog 511
# 需要将内核中该值设置的比Redis配置文件中的值要大
root@ubuntu18-server11:~# echo "net.core.somaxconn = 512" >> /etc/sysctl.conf
root@ubuntu18-server11:~# sysctl -p


2、 vm.overcommit_memory: 内存分配
当内核检查是否有足够的可用内存供应用进程使用时,
如果为 0,当有足够的可用内存时,允许申请内存;否则内存申请失败,并把错误返回给应用进程
如果为 1,表示内核允许分配所有的物理内存,不管当前的内存状态如何
如果为 2,表示内核允许分配超过所有物理内存和交换空间总和的内存
# 设置该值为1
root@ubuntu18-server11:~# echo "vm.overcommit_memory = 1" >> /etc/sysctl.conf
root@ubuntu18-server11:~# sysctl -p


3、transparent hugepage(THP): 关闭大页内存动态分配,关闭后将由Redis进行大页内存分配,而非内核的THP进行分配
always   一直使用
never    从不使用
madvise  由应用程序决定是否使用
# 查看当前使用,Redis程序默认是不使用的
root@ubuntu18-server11:~# cat /sys/kernel/mm/transparent_hugepage/enabled 
always [madvise] never
# 设置为 never
root@ubuntu18-server11:~# echo never > /sys/kernel/mm/transparent_hugepage/enabled

告警处理完成后,重启Redis服务

systemctl restart redis-server.service

1.3 Redis 性能压测

redis-benchmark 是 Redis 自带的压测工具,会对 Redis Server 进行常规的数据读写测试

inline 和 multibulk 是 Redis 定义的数据解析协议

  • inline 是旧版本的数据访问协议,现在主要用在像 redis-cli 这种简单的调试和处理中
  • multibulk 是新版本协议,目前被程序客户端普遍使用,它支持让空格和回车在 redis 中正确的表达出来
root@ubuntu18-server11:~# redis-benchmark --help
参数选项 参数说明
-h 指定 Redis 地址
-p 指定 Redis 端口
-s 使用 Socket 文件连接
-c 指定并发连接数
-n 指定请求总数
-d 以字节的形式指定 SET/GET 值的数据大小
-k 是否开启会话保持,1 表示 keepalived;0 表示 reconnect,默认为1
-r SET/GET/INCR 使用随机 key,SADD 使用随机值,ZADD 使用随机值和评分
-P 是否使用 Pipeline 请求
-q 静默模式,仅显示测试结果的 query/sec 值
--csv 将测试结果转换为 CSV 格式输出
-l(小写L) 生成循环,永久执行测试
-t 运行以逗号分隔的单次多测试命令列表
-I(大写 i) 空闲模式,打开 N 个空闲连接并等待连接
# 默认压测,仅显示压测结果的 query/sec 值
root@ubuntu18-server11:~# redis-benchmark -h 192.168.119.171 -p 6379 -n 1000 -c 200 -a 123456 -q
PING_INLINE: 166666.67 requests per second, p50=0.503 msec        
PING_MBULK: 166666.67 requests per second, p50=0.503 msec
SET: 166666.67 requests per second, p50=0.527 msec        
GET: 166666.67 requests per second, p50=0.519 msec
INCR: 166666.67 requests per second, p50=0.511 msec        
LPUSH: 62500.00 requests per second, p50=2.567 msec
RPUSH: 166666.67 requests per second, p50=0.495 msec        
LPOP: 200000.00 requests per second, p50=0.663 msec
RPOP: 166666.67 requests per second, p50=0.527 msec        
SADD: 200000.00 requests per second, p50=0.535 msec
HSET: 166666.67 requests per second, p50=0.519 msec        
SPOP: 166666.67 requests per second, p50=0.535 msec
ZADD: 166666.67 requests per second, p50=0.559 msec        
ZPOPMIN: 166666.67 requests per second, p50=0.503 msec
LPUSH (needed to benchmark LRANGE): 166666.67 requests per second, p50=0.695 msec        
LRANGE_100 (first 100 elements): 58823.53 requests per second, p50=1.671 msec
LRANGE_300 (first 300 elements): 23809.52 requests per second, p50=4.287 msec         
LRANGE_500 (first 500 elements): 14492.75 requests per second, p50=7.951 msec                  
LRANGE_600 (first 600 elements): 12658.23 requests per second, p50=9.175 msec                  
MSET (10 keys): 124999.99 requests per second, p50=1.127 msec
XADD: 166666.67 requests per second, p50=0.719 msec

压测单个命令,累计1000个请求并发100

image

# 压测多个命令,并以CSV格式输出
root@ubuntu18-server11:~# redis-benchmark -h 192.168.119.171 -p 6379 -t ping,set,get -n 1000 -c 100 -a 123456 --csv
"test","rps","avg_latency_ms","min_latency_ms","p50_latency_ms","p95_latency_ms","p99_latency_ms","max_latency_ms"
"PING_INLINE","83333.34","1.069","0.368","1.127","1.551","1.911","2.071"
"PING_MBULK","200000.00","0.269","0.080","0.255","0.439","0.503","0.543"
"SET","200000.00","0.270","0.080","0.247","0.479","0.607","0.695"
"GET","200000.00","0.265","0.072","0.255","0.439","0.543","0.583"
# 指定每个key的大小为1024Byte
root@ubuntu18-server11:~# redis-benchmark -h 192.168.119.171 -p 6379 -t set -d 1024 -n 1000 -r 1000 -c 200 -a 123456 
====== SET ======                                         
  1000 requests completed in 0.02 seconds
  200 parallel clients
  1024 bytes payload
  keep alive: 1
  host configuration "save": 3600 1 300 100 60 10000
  host configuration "appendonly": no
  multi-thread: no

Latency by percentile distribution:
0.000% <= 0.423 milliseconds (cumulative count 1)
50.000% <= 1.775 milliseconds (cumulative count 504)
75.000% <= 2.375 milliseconds (cumulative count 750)
87.500% <= 4.023 milliseconds (cumulative count 875)
93.750% <= 4.935 milliseconds (cumulative count 938)
96.875% <= 5.175 milliseconds (cumulative count 969)
98.438% <= 5.303 milliseconds (cumulative count 985)
99.219% <= 5.383 milliseconds (cumulative count 993)
99.609% <= 5.431 milliseconds (cumulative count 997)
99.805% <= 5.455 milliseconds (cumulative count 999)
99.902% <= 5.487 milliseconds (cumulative count 1000)
100.000% <= 5.487 milliseconds (cumulative count 1000)

Cumulative distribution of latencies:
0.000% <= 0.103 milliseconds (cumulative count 0)
0.600% <= 0.503 milliseconds (cumulative count 6)
1.300% <= 0.607 milliseconds (cumulative count 13)
2.000% <= 0.703 milliseconds (cumulative count 20)
3.100% <= 0.807 milliseconds (cumulative count 31)
4.800% <= 0.903 milliseconds (cumulative count 48)
7.000% <= 1.007 milliseconds (cumulative count 70)
12.900% <= 1.103 milliseconds (cumulative count 129)
19.800% <= 1.207 milliseconds (cumulative count 198)
25.300% <= 1.303 milliseconds (cumulative count 253)
30.400% <= 1.407 milliseconds (cumulative count 304)
35.600% <= 1.503 milliseconds (cumulative count 356)
41.300% <= 1.607 milliseconds (cumulative count 413)
46.200% <= 1.703 milliseconds (cumulative count 462)
52.900% <= 1.807 milliseconds (cumulative count 529)
59.700% <= 1.903 milliseconds (cumulative count 597)
63.500% <= 2.007 milliseconds (cumulative count 635)
66.700% <= 2.103 milliseconds (cumulative count 667)
82.500% <= 3.103 milliseconds (cumulative count 825)
88.600% <= 4.103 milliseconds (cumulative count 886)
96.000% <= 5.103 milliseconds (cumulative count 960)
100.000% <= 6.103 milliseconds (cumulative count 1000)

Summary:
  throughput summary: 58823.53 requests per second
  latency summary (msec):
          avg       min       p50       p95       p99       max
        2.126     0.416     1.775     5.031     5.351     5.487

# 测试向指定列表 mylist 写入总数(-n 10000)个随机数据,每个数据随机在 -r 100 以内
root@ubuntu18-server11:~# redis-benchmark -h 192.168.119.171 -r 100 -n 10000 -a 123456 lpush mylist __rand_int__
root@ubuntu18-server11:~# redis-cli 
127.0.0.1:6379> auth 123456
OK
127.0.0.1:6379> lindex mylist 0
"000000000053"
127.0.0.1:6379> lindex mylist 1
"000000000046"
127.0.0.1:6379> lindex mylist 2
"000000000029"
127.0.0.1:6379> lindex mylist 3
"000000000043"

1.4 Redis key 的淘汰策略

noeviction: 不删除策略,达到最大内存限制时,如果需要更多内容,直接返回错误信息
allkeys-random: 随机删除 key
allkeys-lru: 对所有key使用LRU算法移除
allkeys-lfu: 对所有key使用LFU算法移除

# 针对设置了过期时间的key
volatile-lru: 淘汰最长时间没有被使用的
volatile-random: 随机删除过期时间的key
volatile-lfu: 淘汰最近一段时间内使用次数最少的

在配置文件中可修改默认淘汰策略
image

二、掌握Redis的常见数据类型的使用

2.1 字符串 string

Redis 中所有 key 的类型都是字符串,常用于保存 Session 信息

2.1.1 添加和获取key

set 指令可以创建一个 key 并赋值
get 指令可以获取一个 key 的值

127.0.0.1:6379> set key1 value1
OK
127.0.0.1:6379> get key1
"value1"
127.0.0.1:6379> type key1
string

127.0.0.1:6379> set key1 value11
OK
127.0.0.1:6379> get key1
"value11"

# 大小写敏感
127.0.0.1:6379> get KEY1
"value1"

# 批量设置多个key
127.0.0.1:6379> MSET name snoopy sex male
OK
127.0.0.1:6379> MGET name sex
1) "snoopy"
2) "male"

# 批量获取多个key
127.0.0.1:6379> KEYS *
1) "name"
2) "KEY1"
3) "sex"
4) "key1"

2.1.2 删除 key

127.0.0.1:6379> del key1 KEY1
(integer) 2
127.0.0.1:6379> KEYS *
1) "name"
2) "sex"

2.1.3 追加数据

127.0.0.1:6379> append name "append a word"
(integer) 19
127.0.0.1:6379> get name
"snoopyappend a word"

2.1.4 设置新值并返回旧值

127.0.0.1:6379> get name
"Tom"
127.0.0.1:6379> getset name Jerry
"Tom"
127.0.0.1:6379> get name
"Jerry"

2.1.5 返回 key 所对应的 value 的字节数

127.0.0.1:6379> STRLEN name
(integer) 5
127.0.0.1:6379> STRLEN sex
(integer) 4
127.0.0.1:6379> get name
"Jerry"
127.0.0.1:6379> get sex
"male"

2.1.6 判断 key 是否存在

127.0.0.1:6379> EXISTS name
(integer) 1
127.0.0.1:6379> EXISTS NAME
(integer) 0
127.0.0.1:6379> EXISTS name sex
(integer) 2

2.1.7 查看、设置、取消 key 的过期时间

ttl key # 查看key的剩余生存时间,如果key过期后,会自动删除
-1 # 返回值表示永不过期,默认创建的key是永不过期,重新对key赋值,也会从有剩余生命周期变成永不过期
-2 # 返回值表示没有此key
num # key的剩余有效期

127.0.0.1:6379> TTL name
(integer) -1
127.0.0.1:6379> set Address anhui  EX 100
OK
127.0.0.1:6379> ttl Address
(integer) 92
# 重新设置 key 的过期时间
127.0.0.1:6379> ttl Address
(integer) 35
127.0.0.1:6379> EXPIRE Address 100
(integer) 1
127.0.0.1:6379> ttl Address
(integer) 97
# 取消 key 的过期时间
127.0.0.1:6379> PERSIST Address
(integer) 1
127.0.0.1:6379> ttl Address
(integer) -1

2.1.8 数值增加或降低

利用 INCR 命令簇(INCR,DECR,INCRBY,DECRBY)把字符串当作原子计数器使用

# 数值递增和递减
127.0.0.1:6379> INCR num
(integer) 11
127.0.0.1:6379> get num
"11"
127.0.0.1:6379> DECR num
(integer) 10
127.0.0.1:6379> get num
"10"

# 数值按指定位数增加或减少
127.0.0.1:6379> INCRBY num 2
(integer) 12
127.0.0.1:6379> get num
"12"
127.0.0.1:6379> DECRBY num 5
(integer) 7
127.0.0.1:6379> get num
"7"

2.2 列表 list

列表是一个双向可读写的管道,头部是左侧,尾部是右侧,类似于队列一样,一个列表最多可以包含 2^32 - 1 个元素。元素值可以重复,常用于存入日志等场景。

2.2.1 生成列表并插入数据

LPUSH 和 RPUSH 都可以插入列表

127.0.0.1:6379> LPUSH name zhao qian sun li
(integer) 4
127.0.0.1:6379> TYPE name
list

127.0.0.1:6379> RPUSH course chinese english french
(integer) 3 
127.0.0.1:6379> type course
list

# 从左边追加数据
127.0.0.1:6379> LPUSH name zhou
(integer) 5
# 从右边追加数据
127.0.0.1:6379> RPUSH course math
(integer) 4

2.2.2 获取列表的长度

127.0.0.1:6379> LLEN name
(integer) 5
127.0.0.1:6379> LLEN course
(integer) 4

2.2.3 获取列表中指定位置的数据

列表 name 使用 LPUSH 插入的值排列方式如下,正索引在上,负索引在下

image

列表 couse 使用 RPUSH 插入的值排列方式如下,正索引在上,负索引在下
image

127.0.0.1:6379> LINDEX name 0
"zhou"
127.0.0.1:6379> LINDEX name 4
"zhao"
127.0.0.1:6379> LINDEX name -1
"zhao"
127.0.0.1:6379> LINDEX course 0
"chinese"

# 查看所有元素
127.0.0.1:6379> LRANGE  name 0 -1
1) "zhou"
2) "li"
3) "sun"
4) "qian"
5) "zhao"
127.0.0.1:6379> LRANGE  course 0 -1
1) "chinese"
2) "english"
3) "french"
4) "math"

2.2.4 修改列表指定索引值

127.0.0.1:6379> LRANGE  course 0 -1
1) "chinese"
2) "english"
3) "french"
4) "math"
127.0.0.1:6379> lset course 2 python
OK
127.0.0.1:6379> lrange course 0 -1
1) "chinese"
2) "english"
3) "python"
4) "math"

2.2.5 移除列表数据

LPOP 从左边弹出一个数据,RPOP 从右边弹出一个数据

127.0.0.1:6379> LPOP name
"zhou"
127.0.0.1:6379> LRANGE name 0 -1
1) "li"
2) "sun"
3) "qian"
4) "zhao"
127.0.0.1:6379> RPOP name 
"zhao"
127.0.0.1:6379> LRANGE name 0 -1
1) "li"
2) "sun"
3) "qian"

LTRIM 对一个列表进行修剪,让列表只保留指定区间内的元素,不在指定区间之内的元素都将被删除

127.0.0.1:6379> LRANGE course 0 -1
1) "chinese"
2) "english"
3) "python"
4) "math"

# 保留第2个和第三个元素
127.0.0.1:6379> LTRIM course 1 2
OK
127.0.0.1:6379> LRANGE course 0 -1
1) "english"
2) "python"

2.3 集合 set

Set 是 String 类型的无序集合,集合中的成员是唯一的,集合中不能出现重复的数据,可以在两个不同的集合中对数据进行对比并取值,常用于取值判断、统计、交集等场景。

集合特点:无序、无重复、集合间操作

2.3.1 生成集合 key

127.0.0.1:6379> SADD set1 v1 v2
(integer) 2
127.0.0.1:6379> TYPE set1
set

2.3.2 追加数值

# 只能追加未存在的数据
127.0.0.1:6379> SADD set1 v3 v4
(integer) 2
# 追加存在的值,无法添加
127.0.0.1:6379> SADD set1 v3
(integer) 0

2.3.3 查看集合中的数据

127.0.0.1:6379> SMEMBERS set1
1) "v1"
2) "v2"
3) "v3"
4) "v4"

2.3.4 删除集合中的元素

127.0.0.1:6379> SREM set1 v3 v2
(integer) 2
127.0.0.1:6379> SMEMBERS set1
1) "v1"
2) "v4"

2.3.5 集合间操作

127.0.0.1:6379> SMEMBERS set2
1) "v4"
2) "v3"
3) "v2"
127.0.0.1:6379> SMEMBERS set1
1) "v1"
2) "v4"

# 获取集合的交集 SINTER
127.0.0.1:6379> SINTER set1 set2
1) "v4"

# 获取集合的并集 SUNION
127.0.0.1:6379> SUNION set1 set2
1) "v1"
2) "v4"
3) "v3"
4) "v2"

# 获取集合的差集 SDIFF
## 属于set1而不属于set2的元素
127.0.0.1:6379> SDIFF set1 set2
1) "v1"
## 属于set2而不属于set1的元素
127.0.0.1:6379> SDIFF set2 set1
1) "v3"
2) "v2"

2.4 有序集合 sorted set

Redis 有序集合 和集合一样也是 String 类型的元素的集合,且不允许重复的成员,不同的是每个元素都会关联一个 double(双精度浮点型)类型的分数,Redis 正是通过该分数来为集合中的成员进行从小到大的排序,有序集合的成员是唯一的,但分数(score)却可以重复。集合通过哈希表实现,所以添加、删除、查找的复杂度都是O(1),集合中最大的成员数为 2^32 -1(4,294,967,295,每个集合可存储40多亿个成员),经常用于排行榜场景。

有序集合特点:

  • 有序
  • 无重复元素
  • 每个元素由score和value组成
  • score 可以重复
  • value 不可以重复

2.4.1 生成有序集合

127.0.0.1:6379> ZADD zset1 1 v1
(integer) 1
127.0.0.1:6379> ZADD zset1 2 v2
(integer) 1
127.0.0.1:6379> ZADD zset1 2 v3  # 分数可以重复,元素值不可以重复
(integer) 1
127.0.0.1:6379> ZADD zset1 3 v4
(integer) 1
127.0.0.1:6379> TYPE zset1
zset

# 一次性生成多个数据
127.0.0.1:6379> ZADD zset2 1 v1 2 v2 3 v3 4 v4
(integer) 4

2.4.2 有序集合实现排行榜

127.0.0.1:6379> ZADD sort 90 nezha 199 zhanlang 60 zhuluoji 30 xiaoyangxiaoen
(integer) 4
127.0.0.1:6379> ZRANGE sort 0 -1 # 正序排序,按照score从小到大
1) "xiaoyangxiaoen"
2) "zhuluoji"
3) "nezha"
4) "zhanlang"
127.0.0.1:6379> ZREVRANGE sort 0 -1  # 倒序排序,按照score从大到小排
1) "zhanlang"
2) "nezha"
3) "zhuluoji"
4) "xiaoyangxiaoen"
127.0.0.1:6379> ZRANGE sort 0 -1 WITHSCORES  # 正序显示并显示得分情况
1) "xiaoyangxiaoen"
2) "30"
3) "zhuluoji"
4) "60"
5) "nezha"
6) "90"
7) "zhanlang"
8) "199"

2.4.3 获取集合的个数

127.0.0.1:6379> ZCARD sort
(integer) 4
127.0.0.1:6379> ZCARD zset1
(integer) 4
127.0.0.1:6379> ZCARD zset2
(integer) 4

2.4.4 基于索引返回数值

127.0.0.1:6379> ZRANGE sort 0 -1
1) "xiaoyangxiaoen"
2) "zhuluoji"
3) "nezha"
4) "zhanlang"

2.4.5 返回某个数值的索引

127.0.0.1:6379> ZRANK sort nezha WITHSCORE
1) (integer) 2
2) "90"
127.0.0.1:6379> ZRANK sort xiaoyangxiaoen WITHSCORE
1) (integer) 0
2) "30"

2.4.6 获取分数

127.0.0.1:6379> ZSCORE sort zhuluoji
"60"
127.0.0.1:6379> ZSCORE sort nezha
"90"

2.4.7 删除元素

127.0.0.1:6379> ZRANGE sort 0 -1
1) "xiaoyangxiaoen"
2) "zhuluoji"
3) "nezha"
4) "zhanlang"
127.0.0.1:6379> ZREM sort zhuluoji nezha
(integer) 2
127.0.0.1:6379> ZRANGE sort 0 -1
1) "xiaoyangxiaoen"
2) "zhanlang"

2.5 哈希 hash

hash 是一个 string 类型的字段(field)和值(value)的映射表,Redis 中每个 hash 可以存储 2^32 -1 个键值对,类似于字典,存放了多个 k/v 对,特别适用于存储对象场景

2.5.1 生成 hash key

HSET hash field value

如果给定的哈希表不存在,那么一个新的哈希表将被创建并执行 HSET 操作
如果域 field 已经存在哈希表中,那么它的旧值将被新值 value 覆盖

127.0.0.1:6379> HSET ID name Tom age 15
(integer) 2
127.0.0.1:6379> TYPE ID
hash

# 查看所有数据
127.0.0.1:6379> HGETALL ID
1) "name"
2) "Tom"
3) "age"
4) "15"
# 增加数据
127.0.0.1:6379> HSET ID gender male
(integer) 1
127.0.0.1:6379> HGETALL ID
1) "name"
2) "Tom"
3) "age"
4) "15"
5) "gender"
6) "male"

# 同名列值被替换
127.0.0.1:6379> HSET ID age 18
(integer) 0
127.0.0.1:6379> HGETALL ID
1) "name"
2) "Tom"
3) "age"
4) "18"
5) "gender"
6) "male"

2.5.2 删除一个 hash key 的对应字段

127.0.0.1:6379> HGETALL ID
1) "name"
2) "Tom"
3) "age"
4) "18"
5) "gender"
6) "male"

# 删除gender和age字段
127.0.0.1:6379> HDEL ID gender age
(integer) 2
127.0.0.1:6379> HGETALL ID
1) "name"
2) "Tom"

2.5.3 批量设置 hash key 的多个 field 和 value

127.0.0.1:6379> HMSET ID name Jerry age 8 city US
OK
127.0.0.1:6379> HGETALL ID
1) "name"
2) "Jerry"
3) "age"
4) "8"
5) "city"
6) "US"

# 获取指定字段的值
127.0.0.1:6379> HMGET ID name city
1) "Jerry"
2) "US"

# 获取所有字段名 field
127.0.0.1:6379> HKEYS ID
1) "name"
2) "age"
3) "city"

# 获取field对应的value
127.0.0.1:6379> HVALS ID
1) "Jerry"
2) "8"
3) "US"

2.5.4 删除 hash

127.0.0.1:6379> DEL ID
(integer) 1
127.0.0.1:6379> HGETALL ID
(empty array)
127.0.0.1:6379> EXISTS ID
(integer) 0

三、掌握redis.conf的基本配置

# 服务器监听地址,多个地址之间用空格隔开
bind 0.0.0.0

# 如果开启安全模式并监听在0.0.0.0,必须要设置密码,否则无法远程连接
protected-mode yes

# 监听端口
port 6379

# Redis server端在收到客户端的ack之前的队列长度
tcp-backlog 511

# socket 文件的路径和权限
unixsocket /run/redis.sock
unixsocketperm 700

# 会话超时时间,多少秒后Redis会和客户端断开连接,为0表示永不超时
timeout 0

# 服务器端每隔多少秒向客户端发送tcp_ack包来探测客户端是否存活
tcp-keepalive 300

# 开启守护进程,会生成一个pid文件
daemonize yes

# 设置通过upstart和systemd管理Redis守护进程,auto为系统自动判断
supervised auto


# pid文件的路径
pidfile /apps/redis/run/redis_6379.pid

# 日志级别
loglevel notice

# 日志存放路径
logfile "/apps/redis/logs/redis_6379.log"

# 默认为redis分配16的数据库,0-15
databases 16

#  是否在Redis启动时在他的日志中显示Redis的logo
always-show-logo no

# 是否显示进程标题信息
set-proc-title yes

# 输出的进程格式
proc-title-template "{title} {listen-addr} {server-mode}"

# 设置环境变量,用于lua脚本
locale-collate ""

# Unless specified otherwise, by default Redis will save the DB:
#   * After 3600 seconds (an hour) if at least 1 change was performed
#   * After 300 seconds (5 minutes) if at least 100 changes were performed
#   * After 60 seconds if at least 10000 changes were performed
save 3600 1 300 100 60 10000

# 快照发生错误时,是否停止写入,建议设置为no
stop-writes-on-bgsave-error no

# 生成的RDB文件是否压缩
rdbcompression yes

# 是否对RDB文件做校验完整性
rdbchecksum yes

# 生成的rdb文件名称
dbfilename dump.rdb

# 在没有开启数据持久化的情况下,是否删除复制中使用的RDB文件,建议为no,不删除
rdb-del-sync-files no

# 快照文件的存放目录
dir "/apps/redis/data/"

# master的IP及端口
# replicaof <masterip> <masterport>

# master的密码
# masterauth <master-password>

# 配置master的用户名
# masteruser <username>

# 当主库与从库断开连接或主从复制正在进行,从库是否进行数据的提供
# 为 yes,从库继续响应客户端的读请求
# 为 no,除去指定命令之外的任何请求都会返回一个错误"SYNC with master in progress"
replica-serve-stale-data yes

# 从库是否只读(yes,若为从库则此库为只读)
replica-read-only yes

# 是否使用socket方式复制数据(无盘同步),新slave连接时需要做全量同步,Redis server 需要从内存dump出新的RDB文件,然后从master传给slave,传送有两种方式
# 基于硬盘(disk-backed): master 创建一个新进程进行 RDB dump,dump 完成后由父进程传给从节点
# 基于socket(diskless): master 创建子进程直接dump RDB到slave的socket,不经过主进程和硬盘
repl-diskless-sync yes

# 磁盘I/O较慢并且网络较快,可以使用diskless;如果都很快,可以用磁盘

# 无盘复制的RDB快照延迟传输时间,在此时间内slave共用同一个RDB快照文件
repl-diskless-sync-delay 5

# 延迟时间内无盘复制的最大slave数量,0为不限制
repl-diskless-sync-max-replicas 0

# slave 是否使用无盘加载
# disabled: slave 不使用无盘加载,先将RDB文件存储到磁盘,等文件传输成功后再加载
# swapdb: 解析时在RAM中保留当前rdb内容的副本,直接从套接字获取数据,需要在内存中保存rdb快照,然后将快照还原为数据保存到内存,需要内存够用,否则会发生OOM
# on-empty-db: 仅在当前slave数据集为空时使用无盘加载
repl-diskless-load disabled

# slave根据master指定的时间进行周期性的ping检测
repl-ping-replica-period 10

# 复制连接的超时时间,需要大于上一个选项设置的值,否则会经常报超时
repl-timeout 60

# 在socket模式下是否在slave套接字发送SYNC之后禁用TCP_NODELAY
# 设置为yes,即开启延迟,Redis将会延迟40ms并合并在此时间内产生的报文,那么Redis将会使用较少的TCP包和带宽来向slaves发送数据,但是这将使数据传输到slave上有延迟,Linux默认配置会达到40ms;如果为no,即关闭延迟,数据传输到slave的延迟将会减少但要使用更多的带宽(有额外的TCP报文包头封装)
repl-disable-tcp-nodelay no

# 复制缓冲区内存大小,只有在slave连接之后才分配内存
repl-backlog-size 512mb

# 多长时间master没有slave连接,就清空backlog缓冲区
repl-backlog-ttl 600

# 当master不可用时,sentinel会根据slave的优先级选举一个master,值越小优先级越高,设置为0将永远不会被选举
replica-priority 100

# 从master同步数据或从AOF读取数据时可能会出错,也可能造成数据不一致,默认设置ignore,忽略这些错误并继续执行命令;设置为panic则遇到错误将不会继续执行
propagation-error-behavior ignore

# 当slave无法从master接收到的命令持久化到磁盘时,那么slave与master的同步将崩溃,需要通过集群监控及时发现集群同步异常并修复,为了保证集群数据的一致性,不建议修改默认值,但为了兼容老版本Redis可以设置为yes,这只会记录一个告警日志并执行收到的写命令(会导致master与slave的数据不一致)
replica-ignore-disk-write-errors no

# 设置是否公布当前节点,默认情况下哨兵会公布全部节点,一个未公布的副本会忽略“sentinel replicate <master>”命令并且不会暴露给哨兵的客户短,相当于隐藏当前节点
replica-announced yes

# 当前服务器至少3个已连接的slave服务器,并且与主服务器最后一次成功通讯的间隔小于10秒钟的情况下,主服务器才执行写命令,否则主服务器拒绝新的数据(只读)。默认是3个,如果是单机不要开启,会导致无法写入数据,报错(noreplicas not enough good replicas to write)
# min-replicas-to-write 1
# min-replicas-max-lag 10

# 将当前slave的明确IP和端口号暴露给master,非必须
# replica-announce-ip 5.5.5.5
# replica-announce-port 1234

# key 失效管理,Redis通过失效表记录客户端访问了哪个key是失效的,并将失效的消息发送给客户端,0为不限制,非必须选项
# tracking-table-max-keys 1000000

################################## SECURITY ###################################

# Warning: since Redis is pretty fast, an outside user can try up to
# 1 million passwords per second against a modern box. This means that you
# should use very strong passwords, otherwise they will be very easy to break.
# Note that because the password is really a shared secret between the client
# and the server, and should not be memorized by any human, the password
# can be easily a long string from /dev/urandom or whatever, so by using a
# long and unguessable password no brute force attack will be possible.

# Redis ACL users are defined in the following format:
#
#   user <username> ... acl rules ...
#
# For example:
#
#   user worker +@list +@connection ~jobs:* on >ffa9203c493aa99
#
# The special username "default" is used for new connections. If this user
# has the "nopass" rule, then new connections will be immediately authenticated
# as the "default" user without the need of any password provided via the
# AUTH command. Otherwise if the "default" user is not flagged with "nopass"
# the connections will start in not authenticated state, and will require
# AUTH (or the HELLO command AUTH option) in order to be authenticated and
# start to work.
#
# The ACL rules that describe what a user can do are the following:
#
#  on           Enable the user: it is possible to authenticate as this user.
#  off          Disable the user: it's no longer possible to authenticate
#               with this user, however the already authenticated connections
#               will still work.
#  skip-sanitize-payload    RESTORE dump-payload sanitization is skipped.
#  sanitize-payload         RESTORE dump-payload is sanitized (default).
#  +<command>   Allow the execution of that command.
#               May be used with `|` for allowing subcommands (e.g "+config|get")
#  -<command>   Disallow the execution of that command.
#               May be used with `|` for blocking subcommands (e.g "-config|set")
#  +@<category> Allow the execution of all the commands in such category
#               with valid categories are like @admin, @set, @sortedset, ...
#               and so forth, see the full list in the server.c file where
#               the Redis command table is described and defined.
#               The special category @all means all the commands, but currently
#               present in the server, and that will be loaded in the future
#               via modules.
#  +<command>|first-arg  Allow a specific first argument of an otherwise
#                        disabled command. It is only supported on commands with
#                        no sub-commands, and is not allowed as negative form
#                        like -SELECT|1, only additive starting with "+". This
#                        feature is deprecated and may be removed in the future.
#  allcommands  Alias for +@all. Note that it implies the ability to execute
#               all the future commands loaded via the modules system.
#  nocommands   Alias for -@all.
#  ~<pattern>   Add a pattern of keys that can be mentioned as part of
#               commands. For instance ~* allows all the keys. The pattern
#               is a glob-style pattern like the one of KEYS.
#               It is possible to specify multiple patterns.
# %R~<pattern>  Add key read pattern that specifies which keys can be read 
#               from.
# %W~<pattern>  Add key write pattern that specifies which keys can be
#               written to. 
#  allkeys      Alias for ~*
#  resetkeys    Flush the list of allowed keys patterns.
#  &<pattern>   Add a glob-style pattern of Pub/Sub channels that can be
#               accessed by the user. It is possible to specify multiple channel
#               patterns.
#  allchannels  Alias for &*
#  resetchannels            Flush the list of allowed channel patterns.
#  ><password>  Add this password to the list of valid password for the user.
#               For example >mypass will add "mypass" to the list.
#               This directive clears the "nopass" flag (see later).
#  <<password>  Remove this password from the list of valid passwords.
#  nopass       All the set passwords of the user are removed, and the user
#               is flagged as requiring no password: it means that every
#               password will work against this user. If this directive is
#               used for the default user, every new connection will be
#               immediately authenticated with the default user without
#               any explicit AUTH command required. Note that the "resetpass"
#               directive will clear this condition.
#  resetpass    Flush the list of allowed passwords. Moreover removes the
#               "nopass" status. After "resetpass" the user has no associated
#               passwords and there is no way to authenticate without adding
#               some password (or setting it as "nopass" later).
#  reset        Performs the following actions: resetpass, resetkeys, resetchannels,
#               allchannels (if acl-pubsub-default is set), off, clearselectors, -@all.
#               The user returns to the same state it has immediately after its creation.
# (<options>)   Create a new selector with the options specified within the
#               parentheses and attach it to the user. Each option should be 
#               space separated. The first character must be ( and the last 
#               character must be ).
# clearselectors            Remove all of the currently attached selectors. 
#                           Note this does not change the "root" user permissions,
#                           which are the permissions directly applied onto the
#                           user (outside the parentheses).
#
# ACL rules can be specified in any order: for instance you can start with
# passwords, then flags, or key patterns. However note that the additive
# and subtractive rules will CHANGE MEANING depending on the ordering.
# For instance see the following example:
#
#   user alice on +@all -DEBUG ~* >somepassword
#
# This will allow "alice" to use all the commands with the exception of the
# DEBUG command, since +@all added all the commands to the set of the commands
# alice can use, and later DEBUG was removed. However if we invert the order
# of two ACL rules the result will be different:
#
#   user alice on -DEBUG +@all ~* >somepassword
#
# Now DEBUG was removed when alice had yet no commands in the set of allowed
# commands, later all the commands are added, so the user will be able to
# execute everything.
#
# Basically ACL rules are processed left-to-right.
#
# The following is a list of command categories and their meanings:
# * keyspace - Writing or reading from keys, databases, or their metadata 
#     in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE,
#     KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace,
#     key or metadata will also have `write` category. Commands that only read
#     the keyspace, key or metadata will have the `read` category.
# * read - Reading from keys (values or metadata). Note that commands that don't
#     interact with keys, will not have either `read` or `write`.
# * write - Writing to keys (values or metadata)
# * admin - Administrative commands. Normal applications will never need to use
#     these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc.
# * dangerous - Potentially dangerous (each should be considered with care for
#     various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS,
#     CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc.
# * connection - Commands affecting the connection or other connections.
#     This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc.
# * blocking - Potentially blocking the connection until released by another
#     command.
# * fast - Fast O(1) commands. May loop on the number of arguments, but not the
#     number of elements in the key.
# * slow - All commands that are not Fast.
# * pubsub - PUBLISH / SUBSCRIBE related
# * transaction - WATCH / MULTI / EXEC related commands.
# * scripting - Scripting related.
# * set - Data type: sets related.
# * sortedset - Data type: zsets related.
# * list - Data type: lists related.
# * hash - Data type: hashes related.
# * string - Data type: strings related.
# * bitmap - Data type: bitmaps related.
# * hyperloglog - Data type: hyperloglog related.
# * geo - Data type: geo related.
# * stream - Data type: streams related.
#
# For more information about ACL configuration please refer to
# the Redis web site at https://redis.io/topics/acl

# ACL LOG
#
# The ACL Log tracks failed commands and authentication events associated
# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked
# by ACLs. The ACL Log is stored in memory. You can reclaim memory with
# ACL LOG RESET. Define the maximum entry length of the ACL Log below.
acllog-max-len 128

# Using an external ACL file
#
# Instead of configuring users here in this file, it is possible to use
# a stand-alone file just listing users. The two methods cannot be mixed:
# if you configure users here and at the same time you activate the external
# ACL file, the server will refuse to start.
#
# The format of the external ACL user file is exactly the same as the
# format that is used inside redis.conf to describe users.
#
# aclfile /etc/redis/users.acl

# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility
# layer on top of the new ACL system. The option effect will be just setting
# the password for the default user. Clients will still authenticate using
# AUTH <password> as usually, or more explicitly with AUTH default <password>
# if they follow the new protocol: both will work.
#
# The requirepass is not compatible with aclfile option and the ACL LOAD
# command, these will cause requirepass to be ignored.
#
# requirepass foobared
requirepass 123456
# New users are initialized with restrictive permissions by default, via the
# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it
# is possible to manage access to Pub/Sub channels with ACL rules as well. The
# default Pub/Sub channels permission if new users is controlled by the
# acl-pubsub-default configuration directive, which accepts one of these values:
#
# allchannels: grants access to all Pub/Sub channels
# resetchannels: revokes access to all Pub/Sub channels
#
# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission.
#
# acl-pubsub-default resetchannels

# Command renaming (DEPRECATED).
#
# ------------------------------------------------------------------------
# WARNING: avoid using this option if possible. Instead use ACLs to remove
# commands from the default user, and put them only in some admin user you
# create for administrative purposes.
# ------------------------------------------------------------------------
#
# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# hard to guess so that it will still be available for internal-use tools
# but not available for general clients.
#
# Example:
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
# It is also possible to completely kill a command by renaming it into
# an empty string:
#
# rename-command CONFIG ""
#
# Please note that changing the name of commands that are logged into the
# AOF file or transmitted to replicas may cause problems.


# 最大客户端连接数,默认为10000个
# maxclients 10000

# 最大内存空间使用,默认无限制,要预留一部分内存空间给内核及Redis缓冲区用
# maxmemory <bytes>

# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
# is reached. You can select one from the following behaviors:
#
# volatile-lru -> Evict using approximated LRU, only keys with an expire set.
# allkeys-lru -> Evict any key using approximated LRU.
# volatile-lfu -> Evict using approximated LFU, only keys with an expire set.
# allkeys-lfu -> Evict any key using approximated LFU.
# volatile-random -> Remove a random key having an expire set.
# allkeys-random -> Remove a random key, any key.
# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
# noeviction -> Don't evict anything, just return an error on write operations.
#
# LRU means Least Recently Used
# LFU means Least Frequently Used
#
# Both LRU, LFU and volatile-ttl are implemented using approximated
# randomized algorithms.
#
# Note: with any of the above policies, when there are no suitable keys for
# eviction, Redis will return an error on write operations that require
# more memory. These are usually commands that create new keys, add data or
# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE,
# SORT (due to the STORE argument), and EXEC (if the transaction includes any
# command that requires memory).
#
# The default is:
# 内存使用完后的默认策略
# maxmemory-policy noeviction

# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
# algorithms (in order to save memory), so you can tune it for speed or
# accuracy. By default Redis will check five keys and pick the one that was
# used least recently, you can change the sample size using the following
# configuration directive.
#
# The default of 5 produces good enough results. 10 Approximates very closely
# true LRU but costs more CPU. 3 is faster but not very accurate.
#
# maxmemory-samples 5

# Eviction processing is designed to function well with the default setting.
# If there is an unusually large amount of write traffic, this value may need to
# be increased.  Decreasing this value may reduce latency at the risk of
# eviction processing effectiveness
#   0 = minimum latency, 10 = default, 100 = process without regard to latency
#
# maxmemory-eviction-tenacity 10

# 从Redis5开始,slave角色忽略最大内存的配置,而是以master为准
# replica-ignore-maxmemory yes

# 清理过去key的CPU的分配比例,范围1~10,10会分配较多CPU去清理过期的key,但是清理时间短
# active-expire-effort 1

# 是否开启惰性数据删除即延迟数据删除(内存空间回收),在删除数据的时候提供异步延时释放键值的功能,把键值释放操作放在BIO(Background I/O)单独的子线程中处理,以减少删除对Redis主线程的阻塞,可以有效的避免big key时带来的性能和可用性问题,生产可以开启,提高Redis主线程的执行效率
## lazyfree-lazy-eviction: 表示当Redis运行内存超过maxmemory,是否开启lazy free机制删除
## lazyfree-lazy-expire: 表示设置了过期时间的键值在过期之后是否开启lazy free机制删除
## lazyfree-lazy-server-del: 有些指令在处理已经存在的键时,会带有一个隐试的del键操作,比如rename命令,当目标键已存在,Redis会先删除目标键,如果这些目标键是一个big key,就会造成阻塞删除的问题,此选项表示在这种场景下是否开启lazy free机制删除 
## replica-lazy-flush: 针对slave进行全量数据同步,slave在加载master的RDB文件前,会运行flushall来清理自己的数据,它表示此时是否开启lazy free机制删除
lazyfree-lazy-eviction yes
lazyfree-lazy-expire yes
lazyfree-lazy-server-del yes
replica-lazy-flush no

# 是否将del设置为类似于UNLINK的删除功能,yes为等于UNLINK是非阻塞删除,no为阻塞删除数据
lazyfree-lazy-user-del yes

# 是否设置清空DB的非阻塞删除,no为阻塞,yes为非阻塞
lazyfree-lazy-user-flush yes

# 是否开启多线程,处理网络I/O请求,数据读写还是单线程,8个以上对性能提升不大,4核心CPU可以设置2~3个Redis线程,8核CPU可以设置6个Redis线程
# io-threads 4

# 是否开启I/O多线程,和前一个选项配合使用,默认不开启
# io-threads-do-reads no

# 是否开启oom-score-adj设置
## no: 不做任何修改(默认值)
## yes: 是最后面relative的别名
## absolute: oom-score-adj-values配置的值将写入内核
## relative: 当服务启动时,使用相对于oom_score_adj初始值的值,然后将其限制在-1000到1000的范围内
oom-score-adj no

# 分别控制Redis主进程、从进程和后台子进程的值
oom-score-adj-values 0 200 800

# 关闭透明大页
disable-thp yes

# 是否开启AOF机制,默认不开启,yes 是开启
appendonly yes

# AOF文件的名称
appendfilename "appendonly.aof"

# AOF文件所在的目录名称,会在定义的 dir 参数下创建一个该目录名称
appenddirname "appendonly"


# AOF的持久化策略配置,no表示不执行fsync,有操作系统保证数据同步到磁盘;always表示每次写入都执行fsync;everysec表示每秒执行一次fsync,可能会丢失自上次保存后1s的数据
# appendfsync always
appendfsync everysec
# appendfsync no

# 在AOF rewrite期间,是否对AOF新纪录的append暂缓使用文件同步策略,主要考虑磁盘I/O开支和请求阻塞时间。默认为no,表示“不暂缓”,新的AOF记录仍然会被立即同步,Linux的默认fsync策略是30s,如果为yes,可能可能丢失30s数据,但由于yes性能较好而且会避免出现阻塞因此比较推荐
no-appendfsync-on-rewrite no

# 当AOF log增长超过指定的百分比时,重写AOF文件,设置为0表示不自动重写AOF日志,
auto-aof-rewrite-percentage 100
# 触发AOF REWRITE的原始最小文件大小
auto-aof-rewrite-min-size 64mb

# 是否加载由于其它异常原因导致的末尾异常的AOF文件(主进程被kill/断电等)
aof-load-truncated yes

# Redis4.0新增RDB-AOF混合持久化格式,在开启了这个功能之后,AOF重写产生的文件将同时包含RDB格式的内容和AOF格式的内容,其中RDB格式的内容用于记录已有的数据,而AOF格式的内容用于记录最近发生了变化的数据,这样Redis就可以同时拥有RDB持久化和AOF持久化的优点
aof-use-rdb-preamble yes

# Redis supports recording timestamp annotations in the AOF to support restoring
# the data from a specific point-in-time. However, using this capability changes
# the AOF format in a way that may not be compatible with existing AOF parsers.
aof-timestamp-enabled no

################################ SHUTDOWN #####################################

# 在与slave同步数据的时候,关闭的最大宽限期,可以等10秒钟让slave复制数据,避免数据的不一致
# shutdown-timeout 10

# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default
# an RDB snapshot is written to disk in a blocking operation if save points are configured.
# The options used on signaled shutdown can include the following values:
# default:  Saves RDB snapshot only if save points are configured.
#           Waits for lagging replicas to catch up.
# save:     Forces a DB saving operation even if no save points are configured.
# nosave:   Prevents DB saving operation even if one or more save points are configured.
# now:      Skips waiting for lagging replicas.
# force:    Ignores any errors that would normally prevent the server from exiting.
#
# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously.
# Example: "nosave force now"
#
# shutdown-on-sigint default
# shutdown-on-sigterm default

################ NON-DETERMINISTIC LONG BLOCKING COMMANDS #####################

# 设置lua脚本、函数以及某些情况下的模块最长执行时长,以毫秒为单位,busy-reply-threshold等于lua-time-limit,是新增加的
# lua-time-limit 5000
# busy-reply-threshold 5000

################################ REDIS CLUSTER  ###############################

# 当需要配置RedisCluster时,需要开启次选项
# cluster-enabled yes

# Redis Cluster 的配置文件,配置文件由node节点自动生成并自动更新
# cluster-config-file nodes-6379.conf

# 集群中node节点的超时时间
# cluster-node-timeout 15000

# 集群通信端口,用于集群状态通信,默认是数据端口之上加10000
# cluster-port 0

# 在执行故障转移时由于某些节点和master断开一段时间导致数据较旧,这些节点就不适合选举master,超过这个时间就不会被进行故障转移
# cluster-replica-validity-factor 10

# 集群迁移屏障,一个主节点至少拥有的正常工作的从节点,即如果主节点的slave节点故障后会将多余的从节点分配到当前主节点成为新的从节点
# cluster-migration-barrier 1

# 迁移后master节点至少还有一个slave节点才能做后期的节点迁移;如master A有两个slave节点,当集群中出现孤儿master B(无从节点),A节点富余的从节点自动被迁移到master B节点作为B的slave;默认yes允许自动迁移
# cluster-allow-replica-migration yes

# 集群请求的槽位全部覆盖,如果一个主库宕机且没有备库就会出现集群槽位补全,yes表示redis集群槽位验证不全就不在对外提供服务;no则可以继续发使用,但会出现数据查询不到的情况(会出现数据的丢失)
# cluster-require-full-coverage yes

# 默认配置为no,如果配置为yes时,在master宕机时,当前的slave不会做故障转移升为master,用于将特定节点永远设置为slave状态
# cluster-replica-no-failover no

# 是否允许集群部分主节点宕机,且无法自动选举的情况下,依然可以从其它节点中读取数据
# cluster-allow-reads-when-down no

# 是否允许集群部分主节点宕机且无法自动选举的情况下,依然可以使用pub/sub(发布者/订阅者)
# cluster-allow-pubsubshard-when-down yes

# 设置集群客户端连接的发送字节缓冲区的内存使用限制,超过限制缓冲区将被清空,0是不限制
# 默认禁用,可配置client-query-buffer-limit的值使用(默认为1Gb),那么此处也可设置为1gb
# cluster-link-sendbuf-limit 0
 
# 配置集群使用主机名公布当前节点信息,默认使用主机IP,使用场景为配置TLS证书时
# cluster-announce-hostname ""

# 配置用于日志中调试的节点的唯一标识符
# cluster-announce-human-nodename ""

# 集群首选的端点类型
# cluster-preferred-endpoint-type ip

# In order to setup your cluster make sure to read the documentation
# available at https://redis.io web site.

################################## SLOW LOG ###################################

# 以微妙为单位的慢日志记录,为负数会禁用慢日志,为0会记录每个命令操作,一般不用开
slowlog-log-slower-than -1
# slowlog-log-slower-than 1000000

# 记录多少条慢日志保存在队列中,超出后会删除最早的,以此滚动删除,使用slowlog get查看慢日志
slowlog-max-len 128

################################ LATENCY MONITOR ##############################
# 是否用于延迟监控,用于redis相应慢的时候做性能分析,单位为毫秒,0为关闭
latency-monitor-threshold 0

################################ LATENCY TRACKING ##############################

# 启用命令监控后,通过 INFO latencystats 命令导出百分比分布
# latency-tracking-info-percentiles 50 99 99.9


############################### ADVANCED CONFIG ###############################
# 在Redis 7.0版本散列表数据类型有两种数据结构保存数据,分别为散列表和 listpack。当数据量很小时,可以使用更高效的数据结构存储,从而达到在不影响性能的情况下节省内存
## listpack(紧凑列表)中,哈希对象保存的键值对数量最大值
hash-max-listpack-entries 512
## listpack 中,哈希对象所有键值对的键和值的字符串长度的字节数都小于hash-max-listpack-value的值
hash-max-listpack-value 64

# 当 list-max-listpack-size 为负数时表示限制每个 quicklistNode 的 ziplist 的内存大小,超过这个大小就会使用 linkedlist 存储数据,每个值有以下含义
## -5: 每个 quicklist 节点上的 ziplist 大小最大 64 kb <--- 正常环境不推荐
## -4: 每个 quicklist 节点上的 ziplist 大小最大 32 kb <--- 正常环境不推荐
## -3: 每个 quicklist 节点上的 ziplist 大小最大 16 kb <--- 正常环境不推荐
## -2: 每个 quicklist 节点上的 ziplist 大小最大 8 kb <--- 正常环境不推荐
## -1: 每个 quicklist 节点上的 ziplist 大小最大 4 kb <--- 正常环境不推荐
# Positive numbers mean store up to _exactly_ that number of elements
# per list node.
# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
# but if your use case is unique, adjust the settings as necessary.
## 默认值为 -2,推荐使用此值
list-max-listpack-size -2

# Lists may also be compressed.
# Compress depth is the number of quicklist ziplist nodes from *each* side of
# the list to *exclude* from compression.  The head and tail of the list
# are always uncompressed for fast push/pop operations.  Settings are:
# 0: disable all list compression
# 1: depth 1 means "don't start compressing until after 1 node into the list,
#    going from either the head or tail"
#    So: [head]->node->node->...->node->[tail]
#    [head], [tail] will always be uncompressed; inner nodes will compress.
# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
#    2 here means: don't compress head or head->next or tail->prev or tail,
#    but compress all nodes between them.
# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
# etc.
# 压缩深度配置,用来配置压缩Lists压缩的,默认是0表示不压缩
list-compress-depth 0

# Sets have a special encoding when a set is composed
# of just strings that happen to be integers in radix 10 in the range
# of 64 bit signed integers.
# The following configuration setting sets the limit in the size of the
# set in order to use this special memory saving encoding.
# 整数集合内的元素最大值。长度不超过此值,Sets的底层会使用intset存储以节省内存,超过后转换为散列表存储
# 散列表Hash table,也叫哈希表,是根据Key而直接访问在内存储存位置的数据结构
set-max-intset-entries 512

# Sets containing non-integer values are also encoded using a memory efficient
# data structure when they have a small number of entries, and the biggest entry
# does not exceed a given threshold. These thresholds can be configured using
# the following directives.
# listpack(紧凑列表)中,无序集合保存的键值对数量最大值
set-max-listpack-entries 128
# listpack中,无序集合对所有键值对的键和值的字符串长度的字节数都小于set-max-listpack-value的值
set-max-listpack-value 64

# Similarly to hashes and lists, sorted sets are also specially encoded in
# order to save a lot of space. This encoding is only used when the length and
# elements of a sorted set are below the following limits:
# listpack中,有序集合保存的键值对数量最大值
zset-max-listpack-entries 128
# listpack中,有序集合所有键值对的键和值的字符串长度的字节数都小于zset-max-listpack-value的值
zset-max-listpack-value 64

# HyperLogLog sparse representation bytes limit. The limit includes the
# 16 bytes header. When a HyperLogLog using the sparse representation crosses
# this limit, it is converted into the dense representation.
#
# A value greater than 16000 is totally useless, since at that point the
# dense representation is more memory efficient.
#
# The suggested value is ~ 3000 in order to have the benefits of
# the space efficient encoding without slowing down too much PFADD,
# which is O(N) with the sparse encoding. The value can be raised to
# ~ 10000 when CPU is not a concern, but space is, and the data set is
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
# HyperLogLog 是一种高级数据结构,统计基数的利器。HyperLogLog 的存储结构分为密集型存储结构和稀疏存储结构两种,默认为稀疏存储结构,稀疏存储结构占用更小的内存
# 默认的配置如下,单位为Byte,超过该值后转换为密集存储结构,推荐的值是0~3000
hll-sparse-max-bytes 3000

# Streams macro node max size / items. The stream data structure is a radix
# tree of big nodes that encode multiple items inside. Using this configuration
# it is possible to configure how big a single node can be in bytes, and the
# maximum number of items it may contain before switching to a new node when
# appending new stream entries. If any of the following settings are set to
# zero, the limit is ignored, so for instance it is possible to set just a
# max entries limit by setting max-bytes to 0 and max-entries to the desired
# value.
# Stream是Redis 5.0版本新增的数据类型,每个节点中都存储着若干Stream条目,因此这些节点通常被称为宏节点
# 单位是Byte,设置每个宏节点占用的内存上限,0表示无限制
stream-node-max-bytes 4096
# 用于设定每个宏节点存储元素个数,默认值为100, 0表示无限制,当一个宏节点存储的Stream条目达到上限,新添加的条目会存储到宏节点中
stream-node-max-entries 100

# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into a hash table
# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# actively rehash the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply from time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
# 设置成yes后,Redis将每100ms使用1ms的CPU时间来对Redis的hash表重新hash,可降低内存的使用
activerehashing yes

# The client output buffer limits can be used to force disconnection of clients
# that are not reading data from the server fast enough for some reason (a
# common reason is that a Pub/Sub client can't consume messages as fast as the
# publisher can produce them).
#
# The limit can be set differently for the three different classes of clients:
#
# normal -> normal clients including MONITOR clients
# replica -> replica clients
# pubsub -> clients subscribed to at least one pubsub channel or pattern
#
# The syntax of every client-output-buffer-limit directive is the following:
#
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
#
# A client is immediately disconnected once the hard limit is reached, or if
# the soft limit is reached and remains reached for the specified number of
# seconds (continuously).
# So for instance if the hard limit is 32 megabytes and the soft limit is
# 16 megabytes / 10 seconds, the client will get disconnected immediately
# if the size of the output buffers reach 32 megabytes, but will also get
# disconnected if the client reaches 16 megabytes and continuously overcomes
# the limit for 10 seconds.
#
# By default normal clients are not limited because they don't receive data
# without asking (in a push way), but just after a request, so only
# asynchronous clients may create a scenario where data is requested faster
# than it can read.
#
# Instead there is a default limit for pubsub and replica clients, since
# subscribers and replicas receive data in a push fashion.
#
# Note that it doesn't make sense to set the replica clients output buffer
# limit lower than the repl-backlog-size config (partial sync will succeed
# and then replica will get disconnected).
# Such a configuration is ignored (the size of repl-backlog-size will be used).
# This doesn't have memory consumption implications since the replica client
# will share the backlog buffers memory.
#
# Both the hard or the soft limit can be disabled by setting them to zero.
# 用于强制断开客户端的连接。当客户端没有及时把缓冲区的数据读取完毕,我会认为这个客户端可能已经异常
## 分成了三种不同类型的客户端
# normal(普通),普通客户端,包括MONITOR客户端
# replica(副本客户端),slave节点的客户端
# pubsub(发布订阅客户端),至少订阅了一个pubsub频道或者模式的客户端
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60

# Client query buffers accumulate new commands. They are limited to a fixed
# amount by default in order to avoid that a protocol desynchronization (for
# instance due to a bug in the client) will lead to unbound memory usage in
# the query buffer. However you can configure it here if you have very special
# needs, such us huge multi/exec requests or alike.
# 客户端 query buffer(查询缓冲区或输入缓冲区),用于保存客户端发送命令,Redis Server从query buffer获取命令并执行
# 如果程序的 key 设计不合理,客户端使用大量的 query buffer,导致 Redis 很容易达到maxmeory限制,最好限制在一个固定的大小来避免占用过大内存的问题
# client-query-buffer-limit 1gb

# In some scenarios client connections can hog up memory leading to OOM
# errors or data eviction. To avoid this we can cap the accumulated memory
# used by all client connections (all pubsub and normal clients). Once we
# reach that limit connections will be dropped by the server freeing up
# memory. The server will attempt to drop the connections using the most 
# memory first. We call this mechanism "client eviction".
#
# Client eviction is configured using the maxmemory-clients setting as follows:
# 0 - client eviction is disabled (default)
#
# A memory value can be used for the client eviction threshold,
# for example:
# 7.0版本新特性,每个与服务器建立连接的客户端都会占用内存(查询缓冲区、输出缓冲区和其他缓冲区)
# 大量的客户端可能会占用过大内存导致OOM,为了皮面这个情况,Redis可以基于(Client Eviction)客户端驱逐机制用于限制内存占用
# 限制所有客户端占用内存总和
# maxmemory-clients 1g
#
# A percentage value (between 1% and 100%) means the client eviction threshold
# is based on a percentage of the maxmemory setting. For example to set client
# eviction at 5% of maxmemory:
# 表示如果maxmemory-clients的内存不足,那么占用内存达到5%的客户端将被驱逐
# maxmemory-clients 5%

# In the Redis protocol, bulk requests, that are, elements representing single
# strings, are normally limited to 512 mb. However you can change this limit
# here, but must be 1mb or greater
# 在Redis协议中,单个大体积的字符串请求,最大限制为512mb,可以调大
# proto-max-bulk-len 512mb

# Redis calls an internal function to perform many background tasks, like
# closing connections of clients in timeout, purging expired keys that are
# never requested, and so forth.
#
# Not all tasks are performed with the same frequency, but Redis checks for
# tasks to perform according to the specified "hz" value.
#
# By default "hz" is set to 10. Raising the value will use more CPU when
# Redis is idle, but at the same time will make Redis more responsive when
# there are many keys expiring at the same time, and timeouts may be
# handled with more precision.
#
# The range is between 1 and 500, however a value over 100 is usually not
# a good idea. Most users should use the default of 10 and raise this up to
# 100 only in environments where very low latency is required.
# Redis在后台调用一些函数来执行很多后台任务,比如关闭超时连接,清理不在被请求的过期的key,rehash,执行RDB内存快照和AOF持久化等操作,可以使用hz参数来决定执行这些任务的频率
# 默认是每秒10次
hz 10

# Normally it is useful to have an HZ value which is proportional to the
# number of clients connected. This is useful in order, for instance, to
# avoid too many clients are processed for each background task invocation
# in order to avoid latency spikes.
#
# Since the default HZ value by default is conservatively set to 10, Redis
# offers, and enables by default, the ability to use an adaptive HZ value
# which will temporarily raise when there are many connected clients.
#
# When dynamic HZ is enabled, the actual configured HZ will be used
# as a baseline, but multiples of the configured HZ value will be actually
# used as needed once more clients are connected. In this way an idle
# instance will use very little CPU time while a busy instance will be
# more responsive.
# Redis会根据上一个参数定义的值来调整后台任务
dynamic-hz yes

# When a child rewrites the AOF file, if the following option is enabled
# the file will be fsync-ed every 4 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
# 当子进程进行AOF重写时,如果配置成 aof-rewrite-incremental-fsync yes,每生成4MB数据就执行一次fsync操作,可以将一个较大的文件分批提交到硬盘来避免瞬间IO,推荐开启
aof-rewrite-incremental-fsync yes

# When redis saves RDB file, if the following option is enabled
# the file will be fsync-ed every 4 MB of data generated. This is useful
# in order to commit the file to the disk more incrementally and avoid
# big latency spikes.
rdb-save-incremental-fsync yes

# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
# idea to start with the default settings and only change them after investigating
# how to improve the performances and how the keys LFU change over time, which
# is possible to inspect via the OBJECT FREQ command.
#
# There are two tunable parameters in the Redis LFU implementation: the
# counter logarithm factor and the counter decay time. It is important to
# understand what the two parameters mean before changing them.
#
# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
# uses a probabilistic increment with logarithmic behavior. Given the value
# of the old counter, when a key is accessed, the counter is incremented in
# this way:
#
# 1. A random number R between 0 and 1 is extracted.
# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
# 3. The counter is incremented only if R < P.
#
# The default lfu-log-factor is 10. This is a table of how the frequency
# counter changes with a different number of accesses with different
# logarithmic factors:
#
# +--------+------------+------------+------------+------------+------------+
# | factor | 100 hits   | 1000 hits  | 100K hits  | 1M hits    | 10M hits   |
# +--------+------------+------------+------------+------------+------------+
# | 0      | 104        | 255        | 255        | 255        | 255        |
# +--------+------------+------------+------------+------------+------------+
# | 1      | 18         | 49         | 255        | 255        | 255        |
# +--------+------------+------------+------------+------------+------------+
# | 10     | 10         | 18         | 142        | 255        | 255        |
# +--------+------------+------------+------------+------------+------------+
# | 100    | 8          | 11         | 49         | 143        | 255        |
# +--------+------------+------------+------------+------------+------------+
#
# NOTE: The above table was obtained by running the following commands:
#
#   redis-benchmark -n 1000000 incr foo
#   redis-cli object freq foo
#
# NOTE 2: The counter initial value is 5 in order to give new objects a chance
# to accumulate hits.
#
# The counter decay time is the time, in minutes, that must elapse in order
# for the key counter to be decremented.
#
# The default value for the lfu-decay-time is 1. A special value of 0 means we
# will never decay the counter.
# 用于调整数据的访问增长率,lfu-log-factor的值越大,counter增长的越慢
# lfu-log-factor 10
# 整数据的访问统计减少值有衰减lfu-decay-time(单位为分钟)来控制,如果值是1分钟,则N分钟没有访问就要减少次
# lfu-decay-time 1

########################### ACTIVE DEFRAGMENTATION #######################
# 在线内存碎片整理
# What is active defragmentation?
# -------------------------------
# 在线内存碎片整理指的是自动压缩内存分配器分配和Redis频繁做更新操作、删除大量过期数据,释放的空间不够连续而无法复用的内存空间
# 通常来说碎片化达到一定程度,Redis会使用Jemalloc的特性创建连续的内存空间,并在此内存空间对现有的值进行拷贝,拷贝完成后会释放掉旧的数据。这个过程会对所有的导致碎片化的key以增量的形式进行。
# Active (online) defragmentation allows a Redis server to compact the
# spaces left between small allocations and deallocations of data in memory,
# thus allowing to reclaim back memory.
#
# Fragmentation is a natural process that happens with every allocator (but
# less so with Jemalloc, fortunately) and certain workloads. Normally a server
# restart is needed in order to lower the fragmentation, or at least to flush
# away all the data and create it again. However thanks to this feature
# implemented by Oran Agra for Redis 4.0 this process can happen at runtime
# in a "hot" way, while the server is running.
#
# Basically when the fragmentation is over a certain level (see the
# configuration options below) Redis will start to create new copies of the
# values in contiguous memory regions by exploiting certain specific Jemalloc
# features (in order to understand if an allocation is causing fragmentation
# and to allocate it in a better place), and at the same time, will release the
# old copies of the data. This process, repeated incrementally for all the keys
# will cause the fragmentation to drop back to normal values.
#
# Important things to understand:
#
# 1. This feature is disabled by default, and only works if you compiled Redis
#    to use the copy of Jemalloc we ship with the source code of Redis.
#    This is the default with Linux builds.
#
# 2. You never need to enable this feature if you don't have fragmentation
#    issues.
#
# 3. Once you experience fragmentation, you can enable this feature when
#    needed with the command "CONFIG SET activedefrag yes".
#
# The configuration parameters are able to fine tune the behavior of the
# defragmentation process. If you are not sure about what they mean it is
# a good idea to leave the defaults untouched.

# Active defragmentation is disabled by default
# 是否启用内存在线整理
# activedefrag no

# Minimum amount of fragmentation waste to start active defrag
# 当Redis的数据量达到多大时进行碎片整理
# active-defrag-ignore-bytes 100mb

# Minimum percentage of fragmentation to start active defrag
# 当碎片达到10%时,执行数据整理
# active-defrag-threshold-lower 10

# Maximum percentage of fragmentation at which we use maximum effort
# 内存碎片超过100%后,尽最大努力整理
# active-defrag-threshold-upper 100

# Minimal effort for defrag in CPU percentage, to be used when the lower
# threshold is reached
# 自动清理过程中,占用 CPU 时间的比例不低于 1%,从而保证能正常展开清理任务。
# active-defrag-cycle-min 1

# Maximal effort for defrag in CPU percentage, to be used when the upper
# threshold is reached
# 自动清理过程占用的CPU时间比例不能超过25%,超过就立刻停止清理,避免对Redis造成阻塞,造成高延迟
# active-defrag-cycle-max 25

# Maximum number of set/hash/zset/list fields that will be processed from
# the main dictionary scan
# 碎片整理扫描到set/hash/zset/list时,当它们的长度小于此阀值时,才会将此键值对加入碎片整理,大于此值则放在另一个列表中延迟处理
# active-defrag-max-scan-fields 1000

# Jemalloc background thread for purging will be enabled by default
# 默认启用Redis自身的内存分配器线程
jemalloc-bg-thread yes

# It is possible to pin different threads and processes of Redis to specific
# CPUs in your system, in order to maximize the performances of the server.
# This is useful both in order to pin different Redis threads in different
# CPUs, but also in order to make sure that multiple Redis instances running
# in the same host will be pinned to different CPUs.
#
# Normally you can do this using the "taskset" command, however it is also
# possible to this via Redis configuration directly, both in Linux and FreeBSD.
#
# You can pin the server/IO threads, bio threads, aof rewrite child process, and
# the bgsave child process. The syntax to specify the cpu list is the same as
# the taskset command:
# 你可以将Redis的不同线程和进程绑定到特定的CPU,减少上下文切换,提高CPU L1、L2 Cache 命中率,实现最大化的性能。分为三个模块
# 主线程和I/O线程:负责命令读取、解析、结果返回。命令执行由主线程完成
# bio 线程:负责执行耗时的异步任务,如 close fd、AOF fsnc 等
# 后台线程:fork 子进程来执行耗时的命令
# Redis支持分别配置上述的CPU亲合度,默认时关闭的
## I/O线程相关操作绑定到CPU 0、2、4、6,定义基于指定的步长选择CPU
# Set redis server/io threads to cpu affinity 0,2,4,6:
# server_cpulist 0-7:2
#
## bio 线程相关操作绑定到CPU 1、3
# Set bio threads to cpu affinity 1,3:
# bio_cpulist 1,3
#
## aof rewrite 后台进程绑定到CPU 8、9、10、11
# Set aof rewrite child process to cpu affinity 8,9,10,11:
# aof_rewrite_cpulist 8-11
#
## bgsave 后台子进程绑定到 CPU 1、10、11
# Set bgsave child process to cpu affinity 1,10,11
# bgsave_cpulist 1,10-11

# In some cases redis will emit warnings and even refuse to start if it detects
# that the system is in bad state, it is possible to suppress these warnings
# by setting the following config which takes a space delimited list of warnings
# to suppress
# 忽略某些警告
## 忽略ARM64平台的COW-BUG
# ignore-warnings ARM64-COW-BUG

四、了解Redis的RDB快照及AOF机制

4.1 RDB 模式

实现过程:Redis 从主进程先 fork 出一个子进程,使用写时复制机制,子进程将内存的数据保存为一个临时文件,比如dump.rdb.temp,当数据保存完成之后再将上一次保存的RDB文件替换,然后关闭子进程,保证每一次RDB快照的数据都是完整的。直接替换RDB文件时,如果发生突然掉电等问题会导致RDB文件还没有保存完整就停止保存从而会发生数据丢失的情况。

优点:

  • RDB快照保存了某个时间点的数据,可以通过脚本执行bgsave(非阻塞)或者save(阻塞)命令自定义时间点备份,可以保留多个备份,当出现问题可以恢复到不同时间点的版本
  • 最大化 I/O 性能。父进程在保存RDB 文件的时候唯一要做的是fork出一个子进程,之后的操作由这个子进程操作,父进程无需任何的IO操作
  • RDB在大量数据比如几个G的数据,恢复的速度比AOF的快

缺点

  • 不能时时的保存数据,会丢失自上一次执行RDB备份后到当前的内存数据
  • 数据量非常大的时候,父进程 fork 出子进程的时候需要一点时间,可能是毫秒或者秒或者分钟,取决于磁盘IO性能

4.2 AOF 模式

实现过程:同RDB一样,使用写时复制机制。AOF 按照操作顺序依次将操作添加到指定的文件中,默认时每秒钟 fsync 一次,当发生故障时顶多丢失1秒钟的数据,可以设置不同的 fsync 策略,fsync 会在后台执行线程,所以主线程可以继续处理用户的正常请求而不受到写入 AOF 文件的 I/O 影响。同时启用RDB模式和AOF模式,AOF 模式的优先级高于RDB模式。

优点:

  • 数据安全性相对较高,根据使用的 fsync策略,默认配置 appendfsync everysec ,即每秒钟 fysnc,当突然发生故障时,也最多丢失一秒钟的数据
  • Redis可以在 AOF文件体积变得过大时,自动地在后台对AOF进行重写,重写后的新AOF文件包含了恢复当前数据集所需的最小命令集合
  • AOF包含一个格式清晰、易于理解的日志文件用于记录所有的修改操作,可以通过该文件完成数据的重建

缺点:

  • 重复记录相同的操作,AOF 文件要大于 RDB 格式的文件
  • AOF 在恢复大数据集时的速度比 RDB 恢复速度慢
  • 根据 fsync 策略不同,AOF 速度可能会慢于 RDB
  • bug 出现的可能性更多

五、掌握Redis ACL的基本使用

ACL(Access Control List)即访问控制列表,Redis 从 6.0 版本开始支持访问权限控制方式。

特点:

  • 通过ACL,可以控制客户端对不同Redis命令和数据的访问权限
  • 基于ACL的访问控制可以保证Redis中的数据更加安全可靠
  • 相比简单的密码认证,它提供了更丰富灵活的权限管理功能
127.0.0.1:6379> acl help
 1) ACL <subcommand> [<arg> [value] [opt] ...]. Subcommands are:
 2) CAT [<category>]
 3)     List all commands that belong to <category>, or all command categories
 4)     when no category is specified.
 5) DELUSER <username> [<username> ...]
 6)     Delete a list of users.
 7) DRYRUN <username> <command> [<arg> ...]
 8)     Returns whether the user can execute the given command without executing the command.
 9) GETUSER <username>
10)     Get the user's details.
11) GENPASS [<bits>]
12)     Generate a secure 256-bit user password. The optional `bits` argument can
13)     be used to specify a different size.
14) LIST
15)     Show users details in config file format.
16) LOAD
17)     Reload users from the ACL file.
18) LOG [<count> | RESET]
19)     Show the ACL log entries.
20) SAVE
21)     Save the current config to the ACL file.
22) SETUSER <username> <attribute> [<attribute> ...]
23)     Create or modify a user with the specified attributes.
24) USERS
25)     List all the registered usernames.
26) WHOAMI
27)     Return the current connection username.
28) HELP
29)     Print this help.

ACL 规则简介

1、启用和禁用用户:
  on:启用用户
  off:禁用用户,已经创建好的TCP链接不会强制中断
  
2、允许或禁止访问某些Key:
  ~<pattern>:基于正则表达式匹配可以访问的key,例如~*和* allkeys 允许访问所有,~mykey和~Mykey是两个( key的名字区分大小写)。
  resetkeys:基于resetkeys指定只能访问的key,如: ~foo:* ~bar:* resetkeys ~objects:* ,则客户端只能访问匹配 object:* 模式的 KEY。

3、允许和禁止调用命令:
  +<command>:将命令添加到用户可以调用的命令列表中,多个命令用空格隔开
  -<command>:将命令从用户可以调用的命令列表中移除
  +@<category>:允许用户调用 <category> 类别中的所有命令,有效类别为@admin,@set,@sortedset等(使用命令acl cat查看),可通过调用ACL CAT命令查看完整列表,特殊类别@all表示所有命令,包括当前和未来版本中存在的所有命令
  -@<category>:禁止用户调用<category> 类别中的所有命令
  +<command>|subcommand:允许使用已禁用命令的特定子命令
  +<command>|subcommand:允许使用已禁用命令的特定子命令
  allcommands等于+@al:包括当前存在的命令以及将来通过模块加载的所有命令
  nocommands等于-@all:禁止调用所有命令

默认用户的基本权限

# 当前只有一个default用户
127.0.0.1:6379> ACL LIST 
1) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"

user default 当前账户名 default
on 账户已经启用
nopass 没有设置密码(如果没有设置密码会出现改字段)
~* 能够访问所有的key
&* 能访问所有的 Pub/Sub 频道
+@all 能执行所有命令

5.1 设置任意权限

# 通过ACL为用户Tom设置任意权限
127.0.0.1:6379> ACL SETUSER Tom on ~* &* +@all >123456
OK
127.0.0.1:6379> acl list
1) "user Tom on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"
2) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"

# 使用用户Tom登录Redis
root@ubuntu18-server:~# redis-cli -h 192.168.119.171
192.168.119.171:6379> auth Tom 123456
OK
192.168.119.171:6379> get key1
"value1"
192.168.119.171:6379> DEL key1
(integer) 1

5.2 设置指定命令权限

127.0.0.1:6379> ACL SETUSER Jerry on ~* &* +get +set >123456
OK
127.0.0.1:6379> ACL LIST
1) "user Jerry on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* -@all +get +set"
2) "user Tom on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"
3) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"

# 使用Jerry用户登录
root@ubuntu18-server:~# redis-cli -h 192.168.119.171
192.168.119.171:6379> auth Jerry 123456
OK
192.168.119.171:6379> set name snoopy
OK
192.168.119.171:6379> get name
"snoopy"
192.168.119.171:6379> DEL name
(error) NOPERM User Jerry has no permissions to run the 'del' command

5.3 ACL 规则持久化保存

默认创建的用于是无法实现持久化保存的,在Redis服务下一次重启后会丢失

127.0.0.1:6379> ACL LIST
1) "user Jerry on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* -@all +get +set"
2) "user Tom on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"
3) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"
127.0.0.1:6379> quit
root@ubuntu18-server11:/apps/redis/etc# systemctl restart redis-server.service         
root@ubuntu18-server11:/apps/redis/etc# redis-cli -a 123456
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6379> ACL LIST
1) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"

5.3.1 使用Redis配置文件定义ACL规则

# 在redis配置文件添加需要创建的用户
root@ubuntu18-server11:/apps/redis/etc# vim redis.conf
user Tom on ~* &* +@all >123456
user Jerry on ~* &* +get +set >123456

root@ubuntu18-server11:/apps/redis/etc# redis-cli -a 123456
Warning: Using a password with '-a' or '-u' option on the command line interface may not be safe.
127.0.0.1:6379> ACL LIST
1) "user Jerry on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* -@all +get +set"
2) "user Tom on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"
3) "user default on sanitize-payload #8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92 ~* &* +@all"

5.3.2 使用外部文件定义ACL规则

# 定义用户ACL文件
## default 用户也添加进入该文件,否则该用户将没有密码即可登录
root@ubuntu18-server11:/apps/redis/etc# cat /apps/redis/etc/users.acl
user Tom on ~* &* +@all >123456
user Jerry on ~* &* +get +set >123456
user default on ~* &* +@all >123456
user Snoopy on ~* &* +get +set >12345678

# 修改配置文件
root@ubuntu18-server11:/apps/redis/etc# vim redis.conf
aclfile /apps/redis/etc/users.acl
root@ubuntu18-server11:/apps/redis/etc# systemctl restart redis-server.service


root@ubuntu18-server11:/apps/redis/etc# redis-cli 
127.0.0.1:6379> auth Snoopy 12345678
OK

六、掌握基于redis-dump实现redis单机数据的迁移

1、源主机导出数据

root@ubuntu18-server11:~# apt -y install ruby ruby-dev gcc
root@ubuntu18-server11:~# gem install redis-dump

root@ubuntu18-server11:~# redis-dump -a 123456 -u 127.0.0.1:6379 > /tmp/redis.json
root@ubuntu18-server11:~# ll /tmp/redis.json 
-rw-r--r-- 1 root root 7480 Mar  6 22:09 /tmp/redis.json

2、导入数据到目的主机并验证数据

root@ubuntu18-server11:~# cat /tmp/redis.json | redis-load -a 123456 -u 192.168.119.161:6379

root@ubuntu18-server:~# redis-cli -a 123456
127.0.0.1:6379> GET key1
"value1"
127.0.0.1:6379> GET key47
"value47"
posted @ 2024-03-07 14:30  wuhaolam  阅读(6)  评论(0编辑  收藏  举报