cephFS的一主一从架构

1. 创建两个存储池分别用于存储mds的元数据和数据

[root@ceph141 ~]# ceph osd pool create cephfs_data
pool 'cephfs_data' created
[root@ceph141 ~]# ceph osd pool create cephfs_metadata
pool 'cephfs_metadata' created

2. 创建一个文件系统,名称为"nolen-cephfs"(名称自定义)

[root@ceph141 ~]# ceph fs new nolen-cephfs cephfs_metadata cephfs_data
  Pool 'cephfs_data' (id '2') has pg autoscale mode 'on' but is not marked as bulk.
  Consider setting the flag by running
    # ceph osd pool set cephfs_data bulk true
new fs with metadata pool 3 and data pool 2nolen
root@ceph141 ~]# ceph osd pool set cephfs_data bulk true  #把数据池标记为大容量
set pool 2 bulk to true
[root@ceph141 ~]# 

3. 查看创建的cephFS文件系统

[root@ceph141 ~]# ceph -s
  cluster:
    id:     60f9514a-7e45-11ef-b0aa-5b9fac39e57f
    health: HEALTH_ERR
            1 filesystem is offline
            1 filesystem is online with fewer MDS than max_mds
 
  services:
    mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 87m)
    mgr: ceph142.kkovzm(active, since 87m), standbys: ceph141.tkvthd
    mds: 0/0 daemons up
    osd: 9 osds: 9 up (since 87m), 9 in (since 38h)
 
  data:
    volumes: 1/1 healthy
    pools:   3 pools, 273 pgs
    objects: 2 objects, 449 KiB
    usage:   326 MiB used, 5.3 TiB / 5.3 TiB avail
    pgs:     273 active+clean

详细信息显示存储池的状态无法正常使用,而且集群是有错误的(HEALTH_ERR)

4. 应用mds的文件系统

[root@ceph141 ~]# ceph orch apply mds nolen-cephfs
Scheduled mds.nolen-cephfs update...
[root@ceph141 ~]# 

5. 添加多个mds服务器

[root@ceph141 ~]#  ceph orch apply mds nolen-cephfs
Scheduled mds.nolen-cephfs update...
[root@ceph141 ~]# ceph orch daemon add mds nolen-cephfs ceph143
Deployed mds.nolen-cephfs.ceph143.glibjq on host 'ceph143'
[root@ceph141 ~]# ceph mds stat
nolen-cephfs:1 {0=nolen-cephfs.ceph143.glibjq=up:active} 1 up:standby
[root@ceph141 ~]# ceph orch daemon add mds nolen-cephfs ceph141
Deployed mds.nolen-cephfs.ceph141.umpbtn on host 'ceph141'
[root@ceph141 ~]# ceph mds stat
nolen-cephfs:1 {0=nolen-cephfs.ceph143.glibjq=up:active} 2 up:standby

6. 查看cephFS集群的状态和详细信息

[root@ceph141 ~]# ceph fs status nolen-cephfs
nolen-cephfs - 0 clients
================
RANK  STATE                 MDS                   ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  nolen-cephfs.ceph143.glibjq  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs_metadata  metadata  96.0k  1709G  
  cephfs_data      data       0   1709G  
          STANDBY MDS            
nolen-cephfs.ceph141.umpbtn  
MDS version: ceph version 19.2.0 (16063ff2022298c9300e49a547a16ffda59baf13) squid (stable)

[root@ceph141 ~]# ceph -s
  cluster:
    id:     60f9514a-7e45-11ef-b0aa-5b9fac39e57f
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 110m)
    mgr: ceph142.kkovzm(active, since 110m), standbys: ceph141.tkvthd
    mds: 1/1 daemons up, 1 standby
    osd: 9 osds: 9 up (since 110m), 9 in (since 39h)
 
  data:
    volumes: 1/1 healthy
    pools:   3 pools, 273 pgs
    objects: 24 objects, 451 KiB
    usage:   344 MiB used, 5.3 TiB / 5.3 TiB avail
    pgs:     273 active+clean
 

7. cephFS的一主一从架构高可用验证

[root@ceph141 ~]# ceph fs status nolen-cephfs
nolen-cephfs - 0 clients
================
RANK  STATE                 MDS                   ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  nolen-cephfs.ceph143.glibjq  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs_metadata  metadata  96.0k  1709G  
  cephfs_data      data       0   1709G  
          STANDBY MDS            
nolen-cephfs.ceph141.umpbtn  
MDS version: ceph version 19.2.0 (16063ff2022298c9300e49a547a16ffda59baf13) squid (stable)

#将active的MDS服务器节点关机模拟故障
[root@ceph143 ~]#shutdown -h now

#查看cephFS集群状态,发现STANDBY MDS 切换为active状态
[root@ceph141 ~]# ceph fs status nolen-cephfs
nolen-cephfs - 0 clients
================
RANK  STATE                 MDS                   ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  nolen-cephfs.ceph141.umpbtn  Reqs:    0 /s     0      3      2      0   
      POOL         TYPE     USED  AVAIL  
cephfs_metadata  metadata  96.0k  2564G  
  cephfs_data      data       0   1709G  
MDS version: ceph version 19.2.0 (16063ff2022298c9300e49a547a16ffda59baf13) squid (stable)

8. 启动ceph143节点后,再次观察变化

[root@ceph141 ~]# ceph fs status nolen-cephfs
nolen-cephfs - 0 clients
================
RANK  STATE                 MDS                   ACTIVITY     DNS    INOS   DIRS   CAPS  
 0    active  nolen-cephfs.ceph141.umpbtn  Reqs:    0 /s    10     13     12      0   
      POOL         TYPE     USED  AVAIL  
cephfs_metadata  metadata   105k  1709G  
  cephfs_data      data       0   1709G  
          STANDBY MDS            
nolen-cephfs.ceph143.glibjq  
MDS version: ceph version 19.2.0 (16063ff2022298c9300e49a547a16ffda59baf13) squid (stable)
[root@ceph141 ~]#
posted @ 2024-10-01 10:44  Nolen_H  阅读(37)  评论(0)    收藏  举报