Loading

RedHat 9 缩小逻辑卷

RedHat 9 缩小逻辑卷

对逻辑卷进行缩小时,数据丢失的风险会更大。生产环境中缩容时一定要先备份数据。

[root@zhpj ~]# df -hT
Filesystem            Type      Size  Used Avail Use% Mounted on
devtmpfs              devtmpfs  4.0M     0  4.0M   0% /dev
tmpfs                 tmpfs     1.9G     0  1.9G   0% /dev/shm
tmpfs                 tmpfs     779M  9.5M  769M   2% /run
/dev/mapper/rhel-root xfs        17G  6.2G   11G  37% /
/dev/sr0              iso9660   9.0G  9.0G     0 100% /media/rhel9
/dev/sdb1             xfs      1014M  260M  755M  26% /boot
tmpfs                 tmpfs     390M   52K  390M   1% /run/user/42
tmpfs                 tmpfs     390M   36K  390M   1% /run/user/0
[root@zhpj ~]# 

ext4 文件系统缩容

缩容的步骤:

# 先检查磁盘完整性
[root@zhpj ~]# e2fsck -f /dev/data/lv02
e2fsck 1.46.5 (30-Dec-2021)
Pass 1: Checking inodes, blocks, and sizes
Pass 2: Checking directory structure
Pass 3: Checking directory connectivity
Pass 4: Checking reference counts
Pass 5: Checking group summary information
/dev/data/lv02: 11/393216 files (0.0% non-contiguous), 33902/1572864 blocks
[root@zhpj ~]# 

# 缩小文件系统容量
[root@zhpj ~]# resize2fs /dev/data/lv02 2G
resize2fs 1.46.5 (30-Dec-2021)
Resizing the filesystem on /dev/data/lv02 to 524288 (4k) blocks.
The filesystem on /dev/data/lv02 is now 524288 (4k) blocks long.

[root@zhpj ~]# 

[root@zhpj ~]# 
# 此时逻辑卷还是 6GB
[root@zhpj ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  lv01 data -wi-ao----  11.00g                                                
  lv02 data -wi-a-----   6.00g                                                
  root rhel -wi-ao---- <17.00g                                                
  swap rhel -wi-ao----   2.00g                                                
[root@zhpj ~]#

# 但是文件系统已经是 2G 了
[root@zhpj ~]# mkdir /lv02
[root@zhpj ~]# 
[root@zhpj ~]# mount /dev/data/lv02 /lv02/
[root@zhpj ~]# 
[root@zhpj ~]# df -hT
Filesystem            Type      Size  Used Avail Use% Mounted on
devtmpfs              devtmpfs  4.0M     0  4.0M   0% /dev
tmpfs                 tmpfs     1.9G     0  1.9G   0% /dev/shm
tmpfs                 tmpfs     779M  9.5M  769M   2% /run
/dev/mapper/rhel-root xfs        17G  6.2G   11G  37% /
/dev/sr0              iso9660   9.0G  9.0G     0 100% /media/rhel9
/dev/sdb1             xfs      1014M  260M  755M  26% /boot
tmpfs                 tmpfs     390M   52K  390M   1% /run/user/42
tmpfs                 tmpfs     390M   36K  390M   1% /run/user/0
/dev/mapper/data-lv01 xfs        11G  112M   11G   1% /lv01
/dev/mapper/data-lv02 ext4      2.0G   24K  1.9G   1% /lv02
[root@zhpj ~]# 
[root@zhpj ~]# umount /lv02
[root@zhpj ~]# 

# 缩小逻辑卷容量
[root@zhpj ~]# lvreduce -L 2G /dev/data/lv02 
  File system ext4 found on data/lv02.
  File system size (2.00 GiB) is equal to the requested size (2.00 GiB).
  File system reduce is not needed, skipping.
  Size of logical volume data/lv02 changed from 6.00 GiB (1536 extents) to 2.00 GiB (512 extents).
  Logical volume data/lv02 successfully resized.
[root@zhpj ~]# 
[root@zhpj ~]# mount /dev/data/lv02 /lv02/
[root@zhpj ~]#
[root@zhpj ~]# df -hT
Filesystem            Type      Size  Used Avail Use% Mounted on
devtmpfs              devtmpfs  4.0M     0  4.0M   0% /dev
tmpfs                 tmpfs     1.9G     0  1.9G   0% /dev/shm
tmpfs                 tmpfs     779M  9.5M  769M   2% /run
/dev/mapper/rhel-root xfs        17G  6.2G   11G  37% /
/dev/sr0              iso9660   9.0G  9.0G     0 100% /media/rhel9
/dev/sdb1             xfs      1014M  260M  755M  26% /boot
tmpfs                 tmpfs     390M   52K  390M   1% /run/user/42
tmpfs                 tmpfs     390M   36K  390M   1% /run/user/0
/dev/mapper/data-lv01 xfs        11G  112M   11G   1% /lv01
/dev/mapper/data-lv02 ext4      2.0G   24K  1.9G   1% /lv02
[root@zhpj ~]# 

xfs 文件系统不能直接缩容

xfs 文件系统不能直接,只能通过备份后删除分区,再重新创建的方式缩小容量。

  1. 先备份分区,-l 0​ 完整备份
[root@zhpj ~]# xfsdump -l 0 -f /root/lv01.dump /lv01
xfsdump: using file dump (drive_simple) strategy
xfsdump: version 3.1.10 (dump format 3.0) - type ^C for status and control

 ============================= dump label dialog ==============================

please enter label for this dump session (timeout in 300 sec)
 -> data_xfs_dump
session label entered: "data_xfs_dump"

 --------------------------------- end dialog ---------------------------------

xfsdump: level 0 dump of zhpj.com:/lv01
xfsdump: dump date: Sun Aug 18 16:37:32 2024
xfsdump: session id: 08ed8a13-a626-4f2d-8eb8-c1f8b66b33c2
xfsdump: session label: "data_xfs_dump"
xfsdump: ino map phase 1: constructing initial dump list
xfsdump: ino map phase 2: skipping (no pruning necessary)
xfsdump: ino map phase 3: skipping (only one dump stream)
xfsdump: ino map construction complete
xfsdump: estimated dump size: 25216 bytes

 ============================= media label dialog =============================

please enter label for media in drive 0 (timeout in 300 sec)
 -> data_xfs_dedia_test
media label entered: "data_xfs_dedia_test"

 --------------------------------- end dialog ---------------------------------

xfsdump: creating dump session media file 0 (media 0, file 0)
xfsdump: dumping ino map
xfsdump: dumping directories
xfsdump: dumping non-directory files
xfsdump: ending media file
xfsdump: media file size 22216 bytes
xfsdump: dump size (non-dir files) : 544 bytes
xfsdump: dump complete: 64 seconds elapsed
xfsdump: Dump Summary:
xfsdump:   stream 0 /root/lv01.dump OK (success)
xfsdump: Dump Status: SUCCESS
[root@zhpj ~]# 

[root@zhpj ~]# ll lv01.dump 
-rw-r--r--. 1 root root 22216 Aug 18 16:38 lv01.dump
[root@zhpj ~]#

  1. 取消挂载,移除 lv01 逻辑卷
[root@zhpj ~]# umount /lv01 
[root@zhpj ~]# 
[root@zhpj ~]# lvremove /dev/data/lv01 
Do you really want to remove active logical volume data/lv01? [y/n]: y
  Logical volume "lv01" successfully removed.
[root@zhpj ~]# 
[root@zhpj ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  lv02 data -wi-ao----   2.00g                                                  
  root rhel -wi-ao---- <17.00g                                                  
  swap rhel -wi-ao----   2.00g                                                  
[root@zhpj ~]#

  1. 重新创建逻辑卷,并格式为 xfs 格式
[root@zhpj ~]# lvcreate -L 5G -n lv01 data
WARNING: xfs signature detected on /dev/data/lv01 at offset 0. Wipe it? [y/n]: y
  Wiping xfs signature on /dev/data/lv01.
  Logical volume "lv01" created.
[root@zhpj ~]# 
[root@zhpj ~]# lvs
  LV   VG   Attr       LSize   Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
  lv01 data -wi-a-----   5.00g                                                  
  lv02 data -wi-ao----   2.00g                                                  
  root rhel -wi-ao---- <17.00g                                                  
  swap rhel -wi-ao----   2.00g                                                  
[root@zhpj ~]#
[root@zhpj ~]# mkfs.xfs /dev/data/lv01 
meta-data=/dev/data/lv01         isize=512    agcount=4, agsize=327680 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=1, sparse=1, rmapbt=0
         =                       reflink=1    bigtime=1 inobtcount=1
data     =                       bsize=4096   blocks=1310720, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
[root@zhpj ~]#

  1. 挂载分区,导入之前的备份
[root@zhpj ~]# mount /dev/data/lv01 /lv01/
[root@zhpj ~]# 
[root@zhpj ~]# df -hT
Filesystem            Type      Size  Used Avail Use% Mounted on
devtmpfs              devtmpfs  4.0M     0  4.0M   0% /dev
tmpfs                 tmpfs     1.9G     0  1.9G   0% /dev/shm
tmpfs                 tmpfs     779M  9.5M  769M   2% /run
/dev/mapper/rhel-root xfs        17G  6.2G   11G  37% /
/dev/sr0              iso9660   9.0G  9.0G     0 100% /media/rhel9
/dev/sdb1             xfs      1014M  260M  755M  26% /boot
tmpfs                 tmpfs     390M   52K  390M   1% /run/user/42
tmpfs                 tmpfs     390M   36K  390M   1% /run/user/0
/dev/mapper/data-lv02 ext4      2.0G   24K  1.9G   1% /lv02
/dev/mapper/data-lv01 xfs       5.0G   68M  5.0G   2% /lv01
[root@zhpj ~]#
[root@zhpj ~]# xfsrestore -f /root/lv01.dump /lv01
xfsrestore: using file dump (drive_simple) strategy
xfsrestore: version 3.1.10 (dump format 3.0) - type ^C for status and control
xfsrestore: searching media for dump
xfsrestore: examining media file 0
xfsrestore: dump description: 
xfsrestore: hostname: zhpj.com
xfsrestore: mount point: /lv01
xfsrestore: volume: /dev/mapper/data-lv01
xfsrestore: session time: Sun Aug 18 16:37:32 2024
xfsrestore: level: 0
xfsrestore: session label: "data_xfs_dump"
xfsrestore: media label: "data_xfs_dedia_test"
xfsrestore: file system id: a8fb532d-9734-4d1c-97e2-8f0aef125572
xfsrestore: session id: 08ed8a13-a626-4f2d-8eb8-c1f8b66b33c2
xfsrestore: media id: 2cccb32f-6d72-48b8-a64c-598306f5c975
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: 1 directories and 1 entries processed
xfsrestore: directory post-processing
xfsrestore: restoring non-directory files
xfsrestore: restore complete: 0 seconds elapsed
xfsrestore: Restore Summary:
xfsrestore:   stream 0 /root/lv01.dump OK (success)
xfsrestore: Restore Status: SUCCESS
[root@zhpj ~]# 
[root@zhpj ~]# ll /lv01/
total 4
-rw-r--r--. 1 root root 13 Aug 18 15:46 msg.log
[root@zhpj ~]# 

  1. 如果开机自动挂载使用的是 UUID 的方式,需要修改UUID值
[root@zhpj ~]# blkid | grep lv01
/dev/mapper/data-lv01: UUID="b8b3d060-40cd-4d8d-9c41-73ff59af614e" TYPE="xfs"
[root@zhpj ~]# 
[root@zhpj ~]# vim /etc/fstab
[root@zhpj ~]#

# 修改后测试挂载配置
[root@zhpj ~]# umount /lv01
[root@zhpj ~]# 
[root@zhpj ~]# mount -a
[root@zhpj ~]#
posted @ 2024-08-18 16:59  zhpj  阅读(45)  评论(0)    收藏  举报