xone

  博客园 :: 首页 :: 博问 :: 闪存 :: 新随笔 :: 联系 :: 订阅 订阅 :: 管理 ::
# 查看osd状态,发现osd.5的状态为down
root@ceph1:/data/ceph-cluster# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.48843  root default                             
-3         0.19537      host ceph4                           
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
-5         0.19537      host ceph5                           
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
-7         0.09769      host ceph6                           
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5               0  osd.5             down   1.00000  1.00000

root@ceph1:/data/ceph-cluster# ceph osd out osd.5
root@ceph1:/data/ceph-cluster# ceph osd rm osd.5
root@ceph1:/data/ceph-cluster# ceph osd crush rm osd.5
root@ceph1:/data/ceph-cluster# ceph auth del osd.5
root@ceph1:/data/ceph-cluster# ceph-deploy disk zap ceph6 /dev/vdc

# 查看vg,在ceph6上操作
root@ceph6:~# lvs
  LV                                             VG                                        Attr       LSize    Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  osd-block-c4f1cd25-f6e5-4b0e-a920-eb160d91440a ceph-b00157ed-aca3-4d0f-964a-8050f21c0051 -wi-a----- <100.00g                                                    
  osd-block-65c97b69-a3da-4054-8eb0-6be460ee09b8 ceph-ce2f2ed8-3757-456f-a0f0-ef0886216ce4 -wi-ao---- <100.00g                                                    
root@ceph6:~# lsblk
NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0                                                                                                    11:0    1 1024M  0 rom  
vda                                                                                                   252:0    0  100G  0 disk 
├─vda1                                                                                                252:1    0    1M  0 part 
├─vda2                                                                                                252:2    0  500M  0 part /boot
└─vda3                                                                                                252:3    0 99.5G  0 part /
vdb                                                                                                   252:16   0  100G  0 disk 
└─ceph--ce2f2ed8--3757--456f--a0f0--ef0886216ce4-osd--block--65c97b69--a3da--4054--8eb0--6be460ee09b8 253:0    0  100G  0 lvm  
vdc                                                                                                   252:32   0  100G  0 disk 
└─ceph--b00157ed--aca3--4d0f--964a--8050f21c0051-osd--block--c4f1cd25--f6e5--4b0e--a920--eb160d91440a 253:1    0  100G  0 lvm  

# 删除对应vg,在ceph6上操作
root@ceph6:~# vgremove ceph-b00157ed-aca3-4d0f-964a-8050f21c0051
Do you really want to remove volume group "ceph-b00157ed-aca3-4d0f-964a-8050f21c0051" containing 1 logical volumes? [y/n]: y
Do you really want to remove and DISCARD active logical volume ceph-b00157ed-aca3-4d0f-964a-8050f21c0051/osd-block-c4f1cd25-f6e5-4b0e-a920-eb160d91440a? [y/n]: y
  Logical volume "osd-block-c4f1cd25-f6e5-4b0e-a920-eb160d91440a" successfully removed
  Volume group "ceph-b00157ed-aca3-4d0f-964a-8050f21c0051" successfully removed
# 查看lvm是否删除成功
root@ceph6:~# lsblk
NAME                                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0                                                                                                    11:0    1 1024M  0 rom  
vda                                                                                                   252:0    0  100G  0 disk 
├─vda1                                                                                                252:1    0    1M  0 part 
├─vda2                                                                                                252:2    0  500M  0 part /boot
└─vda3                                                                                                252:3    0 99.5G  0 part /
vdb                                                                                                   252:16   0  100G  0 disk 
└─ceph--ce2f2ed8--3757--456f--a0f0--ef0886216ce4-osd--block--65c97b69--a3da--4054--8eb0--6be460ee09b8 253:0    0  100G  0 lvm  
vdc                                                                                                   252:32   0  100G  0 disk 
└─ceph--b00157ed--aca3--4d0f--964a--8050f21c0051-osd--block--c4f1cd25--f6e5--4b0e--a920--eb160d91440a 253:1    0  100G  0 lvm  
# 继续删除
root@ceph6:~# dmsetup remove ceph--b00157ed--aca3--4d0f--964a--8050f21c0051-osd--block--c4f1cd25--f6e5--4b0e--a920--eb160d91440a

# 重新添加osd
root@ceph1:/data/ceph-cluster# ceph-deploy --overwrite-conf osd create ceph6 --data /dev/vdc
root@ceph1:/data/ceph-cluster# ceph osd tree
ID  CLASS  WEIGHT   TYPE NAME       STATUS  REWEIGHT  PRI-AFF
-1         0.58612  root default                             
-3         0.19537      host ceph4                           
 0    hdd  0.09769          osd.0       up   1.00000  1.00000
 1    hdd  0.09769          osd.1       up   1.00000  1.00000
-5         0.19537      host ceph5                           
 2    hdd  0.09769          osd.2       up   1.00000  1.00000
 3    hdd  0.09769          osd.3       up   1.00000  1.00000
-7         0.19537      host ceph6                           
 4    hdd  0.09769          osd.4       up   1.00000  1.00000
 5    hdd  0.09769          osd.5       up   1.00000  1.00000

posted on 2022-11-15 14:43  周小百  阅读(103)  评论(0)    收藏  举报