redis内存使用率排查
RedisDB内存使用率分析
- 查看当前实例有哪些DB
redis> info keyspace # Keyspace db0:keys=4,expires=0,avg_ttl=0 db5:keys=5,expires=0,avg_ttl=0 db6:keys=24848,expires=24847,avg_ttl=1814866 db7:keys=53216,expires=53209,avg_ttl=103466773
- 查看当前实例的内存
redis> info memory # Memory used_memory:227329144 used_memory_human:216.80M used_memory_rss:361926656 used_memory_rss_human:345.16M used_memory_peak:2176294808 used_memory_peak_human:2.03G used_memory_peak_perc:10.45% used_memory_overhead:50345278 used_memory_startup:7995640 used_memory_dataset:176983866 used_memory_dataset_perc:80.69% used_memory_lua:66560 used_memory_lua_human:65.00K used_memory_scripts:10080 used_memory_scripts_human:9.84K number_of_cached_scripts:16 maxmemory:2147483648 maxmemory_human:2.00G maxmemory_policy:volatile-lru mem_fragmentation_ratio:1.59 mem_allocator:jemalloc-5.1.0 active_defrag_running:0 lazyfree_pending_objects:0 oom_err_count:0
- 列出实例当前使用的DB内存使用率
redis> EVAL " -- 允许在脚本里混用 SCAN + UNLINK/SELECT redis.replicate_commands() local total = tonumber(ARGV[1]) -- 第一个参数:INFO memory 里的 used_memory local dbs = {} -- 剩下的参数就是 DB 索引 for i = 2, #ARGV do dbs[#dbs+1] = tonumber(ARGV[i]) end local out = {} for _, db in ipairs(dbs) do -- 切库 redis.call('SELECT', db) local cursor, sum = '0', 0 -- 扫描并累加用量 repeat local res = redis.call('SCAN', cursor, 'COUNT', 1000) cursor = res[1] for _, key in ipairs(res[2]) do sum = sum + (redis.call('MEMORY','USAGE', key) or 0) end until cursor == '0' -- 转 MB 并格式化百分比 local mb = sum / 1024 / 1024 local pct = sum * 100 / total out[#out+1] = string.format('DB%d: %.2f MB (%.2f%%)', db, mb, pct) end --(可选)切回 DB0 redis.call('SELECT', 0) return out " 0 227329144 0 5 6 7 -- 返回的数据 1) DB0: 115.00 MB (53.04%) 2) DB5: 0.03 MB (0.01%) 3) DB6: 5.33 MB (2.46%) 4) DB7: 29.57 MB (13.64%)
RedisKey内存使用率分析
标题一的方法已经定位到redisdb使用的内存分析, 接下来分析具体的db内存占用情况
- 定位至DB7
redis> SELECT 7 OK
- 排序本DB前100的大key
EVAL " -- 把所有 key 扫描出来,记录 {key, usage, ttl} 三元组,排序后只取前 100 local cursor = '0' local all = {} repeat local scan = redis.call('SCAN', cursor) cursor = scan[1] for _, k in ipairs(scan[2]) do local u = redis.call('MEMORY','USAGE',k) or 0 local t = redis.call('TTL', k) or -1 table.insert(all, {k, u, t}) end until cursor == '0' -- 按内存用量降序 table.sort(all, function(a,b) return a[2] > b[2] end) -- 只返回前 100 条(如果你想看更多可以改这里的 100) local out = {} for i = 1, math.min(100, #all) do table.insert(out, all[i]) end return out " 0
解释
-
SCAN 循环取出所有 key
-
MEMORY USAGE key 拿到该 key 占用的字节数
-
TTL key 拿到剩余秒数(-1 表示无过期,-2 表示 key 不存在)
-
按占用内存排序,最后截取前 100
-
- 查看以CASES_WORK前缀的KEY占用内存使用率
EVAL " local prefix = 'CASES_WORK' local cursor, sum = '0', 0 repeat local res = redis.call('SCAN', cursor, 'MATCH', prefix..'*', 'COUNT', 1000) cursor = res[1] for _, key in ipairs(res[2]) do sum = sum + (redis.call('MEMORY','USAGE', key) or 0) end until cursor == '0' -- 取总内存 local info = redis.call('INFO','memory') local total = tonumber(string.match(info,'used_memory:(%d+)')) -- 计算百分比 local pct = sum * 100 / total -- 返回:{前缀 key 总占用, Redis 总占用, 百分比} return { sum, total, string.format('%.2f%%', pct) } " 0 -- 返回数据 1) 2616464 2) 220134304 3) 1.19%
- 查看并列出以CASES_WORK前缀的KEY, 限制1000个
EVAL " local cursor, all = '0', {} repeat local res = redis.call('SCAN', cursor, 'MATCH', 'CASES_WORK*', 'COUNT', 1000) cursor = res[1] for _, k in ipairs(res[2]) do table.insert(all, k) end until cursor == '0' return all " 0 -- 返回的数据 ... 288) CASES_WORK:a9508eba-80b4-4783-91ca-255a906f29d7 289) CASES_WORK:5e321dbf-b655-4371-8275-310779c37737 290) CASES_WORK:38519e5d-7cad-4400-83cc-6271cffe7839 291) CASES_WORK:21158dfc-874f-4e5e-ae17-e43e8e846b45 292) CASES_WORK:10690943-5790-4f31-a65b-3d54e75a5541 293) CASES_WORK:26c3f7a6-a838-4016-bcf6-ebb2e4651eee 294) CASES_WORK:9303d821-d5d2-471c-a83d-c21145afb5a4 295) CASES_WORK:c7a721a1-4557-4372-9804-511a0f07d63c 296) CASES_WORK:b8ffa9f6-0860-4dd3-9133-5622c572133e 297) CASES_WORK:afaa29a1-79d3-489b-850a-05e8a3ae58fb 298) CASES_WORK:487f5ce0-c47d-409b-8776-4e456d433fbd 299) CASES_WORK:c9481de2-c6fa-442b-8f4e-3e817eea7cfc
- 批量删除以CASES_WORK前缀的KEY
-- 切到目标库 SELECT 7 -- EVAL 脚本批量删除所有 CASES_WORK* 前缀的 key EVAL " -- 打开命令模式复制,允许 SCAN + UNLINK 混用 redis.replicate_commands() local cursor = '0' repeat -- 分批扫描匹配 local res = redis.call('SCAN', cursor, 'MATCH', 'CASES_WORK*', 'COUNT', 1000) cursor = res[1] for _, k in ipairs(res[2]) do redis.call('UNLINK', k) end until cursor == '0' return 'DELETED ALL CASES_WORK*' " 0
- 如果怕误操作以至于不影响全部数据, 可以分批进行清理
-- 1. 先切到目标库(以 DB7 为例) SELECT 7 -- 2. 每次运行下面这条 EVAL,修改最后的数字为你想删的“本批限额”: EVAL " -- 切换到命令复制模式,允许 SCAN + UNLINK 混用 redis.replicate_commands() local cursor = '0' local deleted = 0 local limit = tonumber(ARGV[1]) repeat local res = redis.call('SCAN', cursor, 'MATCH', 'CASES_WORK*', 'COUNT', 1000) cursor = res[1] for _, k in ipairs(res[2]) do if deleted < limit then redis.call('UNLINK', k) deleted = deleted + 1 end end until cursor == '0' or deleted >= limit return deleted " 0 1000
- 统计以CASE_WORK前缀的KEY数量
EVAL " local cursor = '0' local total = 0 repeat -- 扫描匹配 CASES_WORK* 的一批 key local res = redis.call('SCAN', cursor, 'MATCH', 'CASES_WORK*', 'COUNT', 1000) cursor = res[1] -- 累加本批 key 数量 total = total + #res[2] until cursor == '0' return total " 0 -- 返回数据 OK 299