ceph osd附加db和wal设备(squid)
最初osd部署时没有采用分离的模式,只配置了--data /dev/sdx,wal和db都在机械盘上,现在需需要将wal和db挂载到nvme盘上
- stop掉osd的服务
- 在用来做日志分区的nvme盘上创建pv和vg(pv、vg和lv的名字不使用
.) - 为每个osd创建两个单独的lv,db和wal各一个,lv的名字最好能体现出是哪个osd的(wal预写日志占用一般较小几百M即可,日志较大,日志不够了以后还可以扩容lv,前提是vg预留空间,这里我的wal 4G, db 100G)
lvcreate -L 4G -n osd-block-161-wal vgname
lvcreate -L 100G -n osd-block-161-db vgname
- 为每个osd attach db和wal
ceph-bluestore-tool --path /var/lib/ceph/osd/ceph-161 --command show-label
inferring bluefs devices from bluestore path
{
"/var/lib/ceph/osd/ceph-161/block": {
"osd_uuid": "9cd74e07-e70f-4ddf-af4d-2a122db960e4",
"size": 16000896466944,
"btime": "2025-11-21T16:29:11.115306+0800",
"description": "main",
"bfm_blocks": "3906468864",
"bfm_blocks_per_key": "128",
"bfm_bytes_per_block": "4096",
"bfm_size": "16000896466944",
"bluefs": "1",
"ceph_fsid": "c016de81-fc46-4626-9e7b-4e8e23dddcae",
"ceph_version_when_created": "ceph version 19.2.2 (0eceb0defba60152a8182f7bd87d164b639885b8) squid (stable)",
"created_at": "2025-11-21T08:29:41.679247Z",
"elastic_shared_blobs": "1",
"epoch": "29",
"kv_backend": "rocksdb",
"magic": "ceph osd volume v026",
"multi": "yes",
"osd_key": "AQDVIiBpJbJAGhAAp1jsLZ/3Qv8OKY9A40efmQ==",
"ready": "ready",
"require_osd_release": "19",
"type": "bluestore",
"whoami": "161",
"locations": [
"0x0",
"0x40000000",
"0x280000000",
"0x1900000000",
"0xfa00000000"
]
}
}
此时没有日志分区和DB分区,需要手动添加
挂载db
ceph-volume lvm new-db --osd-id 161 --osd-fsid 9cd74e07-e70f-4ddf-af4d-2a122db960e4 --target ceph-nvme1n1-meta/osd-block-161-db
--> Making new volume at /dev/ceph-nvme1n1-meta/osd-block-161-db for OSD: 161 (/var/lib/ceph/osd/ceph-161)
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-161/block.db
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-28
--> New volume attached.
挂载wal
ceph-volume lvm new-wal --osd-id 161 --osd-fsid 9cd74e07-e70f-4ddf-af4d-2a122db960e4 --target ceph-nvme1n1-meta/osd-block-161-wal
--> Making new volume at /dev/ceph-nvme1n1-meta/osd-block-161-wal for OSD: 161 (/var/lib/ceph/osd/ceph-161)
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-161/block.wal
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-27
--> New volume attached.
查看信息
ceph-bluestore-tool --path /var/lib/ceph/osd/ceph-161 --command show-label
inferring bluefs devices from bluestore path
{
"/var/lib/ceph/osd/ceph-161/block": {
"osd_uuid": "9cd74e07-e70f-4ddf-af4d-2a122db960e4",
"size": 16000896466944,
"btime": "2025-11-21T16:29:11.115306+0800",
"description": "main",
"bfm_blocks": "3906468864",
"bfm_blocks_per_key": "128",
"bfm_bytes_per_block": "4096",
"bfm_size": "16000896466944",
"bluefs": "1",
"ceph_fsid": "c016de81-fc46-4626-9e7b-4e8e23dddcae",
"ceph_version_when_created": "ceph version 19.2.2 (0eceb0defba60152a8182f7bd87d164b639885b8) squid (stable)",
"created_at": "2025-11-21T08:29:41.679247Z",
"elastic_shared_blobs": "1",
"epoch": "29",
"kv_backend": "rocksdb",
"magic": "ceph osd volume v026",
"multi": "yes",
"osd_key": "AQDVIiBpJbJAGhAAp1jsLZ/3Qv8OKY9A40efmQ==",
"ready": "ready",
"require_osd_release": "19",
"type": "bluestore",
"whoami": "161",
"locations": [
"0x0",
"0x40000000",
"0x280000000",
"0x1900000000",
"0xfa00000000"
]
},
"/var/lib/ceph/osd/ceph-161/block.wal": {
"osd_uuid": "9cd74e07-e70f-4ddf-af4d-2a122db960e4",
"size": 21474836480,
"btime": "2025-12-12T16:47:17.530077+0800",
"description": "bluefs wal",
"locations": [
"0x0"
]
},
"/var/lib/ceph/osd/ceph-161/block.db": {
"osd_uuid": "9cd74e07-e70f-4ddf-af4d-2a122db960e4",
"size": 107374182400,
"btime": "2025-12-12T16:46:33.059664+0800",
"description": "bluefs db",
"locations": [
"0x0"
]
}
}
可以看到已经有了wal和db
从hdd迁移db数据到nvme分区
ceph-bluestore-tool bluefs-bdev-migrate --path /var/lib/ceph/osd/ceph-161 --devs-source /var/lib/ceph/osd/ceph-161/block --dev-target /var/lib/ceph/osd/ceph-161/block.db
inferring bluefs devices from bluestore path
启动osd服务, 查看bluefs的使用情况
ceph tell osd.161 bluefs stats
0 : device size 0x500000000(20 GiB) : using 0x1900000(25 MiB)
1 : device size 0x1900000000(100 GiB) : using 0x275f00000(9.8 GiB)
2 : device size 0xe8d7fc00000(15 TiB) : using 0x2bf737a9000(2.7 TiB)
RocksDBBlueFSVolumeSelector
>>Settings<< extra=0 B, l0_size=1 GiB, l_base=1 GiB, l_multi=8 B
DEV/LEV WAL DB SLOW * * REAL FILES
LOG 6 MiB 0 B 0 B 0 B 0 B 3.4 MiB 1
WAL 18 MiB 0 B 0 B 0 B 0 B 3.8 MiB 1
DB 0 B 9.8 GiB 0 B 0 B 0 B 9.6 GiB 209
SLOW 0 B 0 B 0 B 0 B 0 B 0 B 0
TOTAL 24 MiB 9.8 GiB 0 B 0 B 0 B 0 B 211
MAXIMUMS:
LOG 6 MiB 0 B 0 B 0 B 0 B 3.4 MiB
WAL 114 MiB 0 B 0 B 0 B 0 B 111 MiB
DB 0 B 9.9 GiB 0 B 0 B 0 B 9.6 GiB
SLOW 0 B 0 B 0 B 0 B 0 B 0 B
TOTAL 120 MiB 9.9 GiB 0 B 0 B 0 B 0 B
>> SIZE << 19 GiB 95 GiB 14 TiB

浙公网安备 33010602011771号