1 pool(s) have non-power-of-two pg_num

 

[root@master ceph]# ceph -s
  cluster:
    id:     1508a2da-5991-487a-836c-d6e6527b1dc7
    health: HEALTH_WARN
            1 pool(s) have non-power-of-two pg_num
            mons master,node1,node2 are low on available space

 

[root@master ceph]# ceph osd pool ls detail
pool 2 'dynamics-pool' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 107 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
        removed_snaps [1~3]
pool 3 'hxl_pool_test' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 114 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
        removed_snaps [1~3]
pool 6 'cephfs_data_pool' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 128 pgp_num 128 autoscale_mode warn last_change 133 flags hashpspool stripe_width 0 application cephfs
pool 7 'cephfs_metadata_pool' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 60 pgp_num 60 autoscale_mode warn last_change 133 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs

 

解决办法:

[root@master ceph]# ceph osd pool get cephfs_metadata_pool pg_num
pg_num: 60


[root@master ceph]#ceph osd pool set cephfs_metadata_pool pg_num 64

 

posted @ 2025-10-21 17:50  slnngk  阅读(5)  评论(0)    收藏  举报