一. ceph-nfs
1. cephfs
1.1 配置启动cephfs
- ceph nfs基于cephfs,需要先配置cephfs
ceph osd pool create cephfs-metadata 16 16
ceph osd pool create cephfs-data 32 32
ceph fs new cephfs cephfs-metadata cephfs-data
ceph orch apply mds cephfs --placement="3 storage01 storage02 storage03"
# 查看mds有三个,两个预备状态
ceph -s
1.2 查看cephfs状态操作
ceph fs ls
ceph fs status
ceph orch ps --daemon_type=mds
ceph mds stat
1.3 cephfs配置挂载
cd /etc/ceph/
ceph-authtool -p /etc/ceph/ceph.client.admin.keyring > admin.key
mount -t ceph storage02:6789,storage03:6789:/ /mnt -o name=admin,secretfile=admin.key
echo "storage02:6789,storage03:6789:/ /mnt ceph name=admin,secretfile=/etc/ceph/admin.key,noatime,_netdev 0 2" >> /etc/fstab
1.4 删除cephfs
# 查看名称
ceph fs ls
ceph fs fail cephfs
ceph fs rm cephfs --yes-i-really-mean-it
vim /var/lib/ceph/ceac4164-3429-11ee-838a-8558ad2c5d49/mon.storage01/config
[global]
'''
mon_allow_pool_delete = true
# 重启生效
docker restart ceph-ceac4164-3429-11ee-838a-8558ad2c5d49-mon-storage01
ceph osd pool delete cephfs-metadata cephfs-metadata --yes-i-really-really-mean-it
ceph osd pool delete cephfs-data cephfs-data --yes-i-really-really-mean-it
2.ceph nfs
2.1 创建nfs
# 集群id可以使用命令 ceph -s 查看
ceph nfs cluster create ceac4164-3429-11ee-838a-8558ad2c5d49 "storage01,storage02" --port 2049
ceph nfs cluster ls
ceph nfs cluster info ceac4164-3429-11ee-838a-8558ad2c5d49
2.2 服务段挂载配置
umount /mnt -l
mount -t ceph storage02:6789,storage03:6789:/ /mnt -o name=admin,secretfile=/etc/ceph/admin.key
mkdir /mnt/data
- nfs操作
- --pseudo-path:nfs别名
- --path:文件系统路径
ceph nfs export create cephfs --cluster-id ceac4164-3429-11ee-838a-8558ad2c5d49 --pseudo-path /nfs01 --fsname cephfs --path=/data
2.3 客户端挂载使用
# 安装客户端
apt install -y nfs-common
# 挂载nfs
mount 10.0.0.10:/nfs01 /mnt/
二.ceph HA nfs
1.创建nfs,并生成vip地址
# 先选取一个vip地址,这里以 10.0.0.100
ceph nfs cluster create mynfs "storage02,storage03" --port 2049 --ingress --virtual_ip 10.0.0.100/24
# 查看状态
ceph orch ls --service_type=nfs
ceph orch ls --service_type=ingress
ceph orch ps --daemon_type=keepalived
ceph orch ps --daemon_type=haproxy
ceph orch ls | grep nfs
2.创建共享nfs
- --cluster-id:集群id名称
- --pseudo-path:虚拟共享点,挂载点
ceph nfs export create cephfs --cluster-id mynfs --pseudo-path /nfs01 --fsname cephfs --path=/
ceph nfs cluster ls
ceph nfs cluster info mynfs
3.挂载使用
mount 10.0.0.100:/nfs01 /mnt/
4.删除nfs
ceph nfs cluster rm mynfs
5.使用yaml创建nfs
vim nfs.yaml
service_type: nfs
service_id: mynfs
placement:
hosts:
- storage02
- storage03
spec:
port: 12345
vim ingress_nfs.yaml
service_type: ingress
service_id: nfs.mynfs
placement:
hosts:
count: 2
hosts:
- storage02
- storage03
spec:
backend_service: nfs.mynfs
frontend_port: 2049
monitor_port: 9000
virtual_ip: 10.0.0.100/24
ceph orch apply -i nfs.yaml
ceph nfs export create cephfs --cluster-id mynfs --pseudo-path /nfs01 --fsname cephfs --path=/